From 7df905910b86721a6ee3a3b6c916a4f8e27d0aa7 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Thu, 25 Jun 2020 22:00:08 -0700 Subject: [PATCH] fix: update retry configs (#44) --- .coveragerc | 16 + .flake8 | 18 + .github/ISSUE_TEMPLATE/bug_report.md | 3 +- .gitignore | 2 + .kokoro/publish-docs.sh | 2 - .kokoro/release.sh | 2 - .kokoro/samples/lint/common.cfg | 34 + .kokoro/samples/lint/continuous.cfg | 6 + .kokoro/samples/lint/periodic.cfg | 6 + .kokoro/samples/lint/presubmit.cfg | 6 + .kokoro/samples/python3.6/common.cfg | 34 + .kokoro/samples/python3.6/continuous.cfg | 7 + .kokoro/samples/python3.6/periodic.cfg | 6 + .kokoro/samples/python3.6/presubmit.cfg | 6 + .kokoro/samples/python3.7/common.cfg | 34 + .kokoro/samples/python3.7/continuous.cfg | 6 + .kokoro/samples/python3.7/periodic.cfg | 6 + .kokoro/samples/python3.7/presubmit.cfg | 6 + .kokoro/samples/python3.8/common.cfg | 34 + .kokoro/samples/python3.8/continuous.cfg | 6 + .kokoro/samples/python3.8/periodic.cfg | 6 + .kokoro/samples/python3.8/presubmit.cfg | 6 + .kokoro/test-samples.sh | 104 + CONTRIBUTING.rst | 15 +- MANIFEST.in | 19 + docs/conf.py | 9 +- docs/index.rst | 2 + docs/multiprocessing.rst | 7 + google/cloud/automl.py | 7 +- google/cloud/automl_v1/__init__.py | 19 +- .../cloud/automl_v1/gapic/auto_ml_client.py | 459 ++--- .../automl_v1/gapic/auto_ml_client_config.py | 117 +- google/cloud/automl_v1/gapic/enums.py | 16 +- .../gapic/prediction_service_client.py | 203 +- .../gapic/prediction_service_client_config.py | 38 +- .../transports/auto_ml_grpc_transport.py | 96 +- .../prediction_service_grpc_transport.py | 70 +- .../automl_v1/proto/annotation_payload.proto | 3 +- .../automl_v1/proto/annotation_payload_pb2.py | 43 +- .../automl_v1/proto/annotation_spec.proto | 13 +- .../automl_v1/proto/annotation_spec_pb2.py | 53 +- .../automl_v1/proto/classification.proto | 25 +- .../automl_v1/proto/classification_pb2.py | 168 +- google/cloud/automl_v1/proto/data_items.proto | 38 +- .../cloud/automl_v1/proto/data_items_pb2.py | 253 ++- google/cloud/automl_v1/proto/dataset.proto | 8 +- google/cloud/automl_v1/proto/dataset_pb2.py | 83 +- google/cloud/automl_v1/proto/detection.proto | 9 +- google/cloud/automl_v1/proto/detection_pb2.py | 105 +- google/cloud/automl_v1/proto/geometry.proto | 3 +- google/cloud/automl_v1/proto/geometry_pb2.py | 47 +- google/cloud/automl_v1/proto/image.proto | 64 +- google/cloud/automl_v1/proto/image_pb2.py | 173 +- google/cloud/automl_v1/proto/io.proto | 708 ++++++- google/cloud/automl_v1/proto/io_pb2.py | 1737 ++++++++--------- google/cloud/automl_v1/proto/model.proto | 5 +- .../automl_v1/proto/model_evaluation.proto | 36 +- .../automl_v1/proto/model_evaluation_pb2.py | 83 +- google/cloud/automl_v1/proto/model_pb2.py | 98 +- google/cloud/automl_v1/proto/operations.proto | 3 +- .../cloud/automl_v1/proto/operations_pb2.py | 245 +-- .../automl_v1/proto/prediction_service.proto | 306 ++- .../automl_v1/proto/prediction_service_pb2.py | 396 ++-- .../proto/prediction_service_pb2_grpc.py | 70 +- google/cloud/automl_v1/proto/service.proto | 370 ++-- google/cloud/automl_v1/proto/service_pb2.py | 766 ++++---- .../cloud/automl_v1/proto/service_pb2_grpc.py | 16 +- google/cloud/automl_v1/proto/text.proto | 28 +- .../automl_v1/proto/text_extraction.proto | 3 +- .../automl_v1/proto/text_extraction_pb2.py | 63 +- google/cloud/automl_v1/proto/text_pb2.py | 109 +- .../cloud/automl_v1/proto/text_segment.proto | 3 +- .../cloud/automl_v1/proto/text_segment_pb2.py | 37 +- .../automl_v1/proto/text_sentiment.proto | 11 +- .../automl_v1/proto/text_sentiment_pb2.py | 68 +- .../cloud/automl_v1/proto/translation.proto | 11 +- .../cloud/automl_v1/proto/translation_pb2.py | 92 +- google/cloud/automl_v1beta1/__init__.py | 16 +- .../automl_v1beta1/gapic/auto_ml_client.py | 1264 ++++++------ .../gapic/auto_ml_client_config.py | 177 +- google/cloud/automl_v1beta1/gapic/enums.py | 38 +- .../gapic/prediction_service_client.py | 115 +- .../gapic/prediction_service_client_config.py | 34 +- .../transports/auto_ml_grpc_transport.py | 255 +-- .../prediction_service_grpc_transport.py | 8 +- .../proto/annotation_payload.proto | 3 +- .../proto/annotation_payload_pb2.py | 46 +- .../proto/annotation_spec.proto | 13 +- .../proto/annotation_spec_pb2.py | 53 +- .../automl_v1beta1/proto/classification.proto | 14 +- .../proto/classification_pb2.py | 196 +- .../automl_v1beta1/proto/column_spec.proto | 9 +- .../automl_v1beta1/proto/column_spec_pb2.py | 89 +- .../automl_v1beta1/proto/data_items.proto | 40 +- .../automl_v1beta1/proto/data_items_pb2.py | 323 +-- .../automl_v1beta1/proto/data_stats.proto | 3 +- .../automl_v1beta1/proto/data_stats_pb2.py | 299 +-- .../automl_v1beta1/proto/data_types.proto | 3 +- .../automl_v1beta1/proto/data_types_pb2.py | 133 +- .../cloud/automl_v1beta1/proto/dataset.proto | 9 +- .../cloud/automl_v1beta1/proto/dataset_pb2.py | 68 +- .../automl_v1beta1/proto/detection.proto | 3 +- .../automl_v1beta1/proto/detection_pb2.py | 170 +- .../cloud/automl_v1beta1/proto/geometry.proto | 3 +- .../automl_v1beta1/proto/geometry_pb2.py | 47 +- google/cloud/automl_v1beta1/proto/image.proto | 65 +- .../cloud/automl_v1beta1/proto/image_pb2.py | 191 +- google/cloud/automl_v1beta1/proto/io.proto | 3 +- google/cloud/automl_v1beta1/proto/io_pb2.py | 1544 +++++++-------- google/cloud/automl_v1beta1/proto/model.proto | 9 +- .../proto/model_evaluation.proto | 9 +- .../proto/model_evaluation_pb2.py | 86 +- .../cloud/automl_v1beta1/proto/model_pb2.py | 81 +- .../automl_v1beta1/proto/operations.proto | 31 +- .../automl_v1beta1/proto/operations_pb2.py | 347 ++-- .../proto/prediction_service.proto | 79 +- .../proto/prediction_service_pb2.py | 398 ++-- .../proto/prediction_service_pb2_grpc.py | 8 +- .../cloud/automl_v1beta1/proto/ranges.proto | 3 +- .../cloud/automl_v1beta1/proto/ranges_pb2.py | 28 +- .../automl_v1beta1/proto/regression.proto | 3 +- .../automl_v1beta1/proto/regression_pb2.py | 31 +- .../cloud/automl_v1beta1/proto/service.proto | 284 ++- .../cloud/automl_v1beta1/proto/service_pb2.py | 1045 +++++----- .../automl_v1beta1/proto/service_pb2_grpc.py | 3 +- .../automl_v1beta1/proto/table_spec.proto | 9 +- .../automl_v1beta1/proto/table_spec_pb2.py | 76 +- .../cloud/automl_v1beta1/proto/tables.proto | 15 +- .../cloud/automl_v1beta1/proto/tables_pb2.py | 332 ++-- .../cloud/automl_v1beta1/proto/temporal.proto | 3 +- .../automl_v1beta1/proto/temporal_pb2.py | 28 +- google/cloud/automl_v1beta1/proto/text.proto | 28 +- .../proto/text_extraction.proto | 3 +- .../proto/text_extraction_pb2.py | 63 +- google/cloud/automl_v1beta1/proto/text_pb2.py | 109 +- .../automl_v1beta1/proto/text_segment.proto | 3 +- .../automl_v1beta1/proto/text_segment_pb2.py | 37 +- .../automl_v1beta1/proto/text_sentiment.proto | 3 +- .../proto/text_sentiment_pb2.py | 66 +- .../automl_v1beta1/proto/translation.proto | 8 +- .../automl_v1beta1/proto/translation_pb2.py | 104 +- google/cloud/automl_v1beta1/proto/video.proto | 3 +- .../cloud/automl_v1beta1/proto/video_pb2.py | 66 +- noxfile.py | 33 +- scripts/decrypt-secrets.sh | 33 + scripts/readme-gen/readme_gen.py | 66 + scripts/readme-gen/templates/README.tmpl.rst | 87 + scripts/readme-gen/templates/auth.tmpl.rst | 9 + .../templates/auth_api_key.tmpl.rst | 14 + .../templates/install_deps.tmpl.rst | 29 + .../templates/install_portaudio.tmpl.rst | 35 + setup.cfg | 16 + synth.metadata | 27 +- synth.py | 37 +- testing/.gitignore | 3 + tests/unit/gapic/v1/test_auto_ml_client_v1.py | 204 +- .../v1beta1/test_auto_ml_client_v1beta1.py | 860 ++++---- .../test_prediction_service_client_v1beta1.py | 11 +- 158 files changed, 10207 insertions(+), 7677 deletions(-) create mode 100644 .kokoro/samples/lint/common.cfg create mode 100644 .kokoro/samples/lint/continuous.cfg create mode 100644 .kokoro/samples/lint/periodic.cfg create mode 100644 .kokoro/samples/lint/presubmit.cfg create mode 100644 .kokoro/samples/python3.6/common.cfg create mode 100644 .kokoro/samples/python3.6/continuous.cfg create mode 100644 .kokoro/samples/python3.6/periodic.cfg create mode 100644 .kokoro/samples/python3.6/presubmit.cfg create mode 100644 .kokoro/samples/python3.7/common.cfg create mode 100644 .kokoro/samples/python3.7/continuous.cfg create mode 100644 .kokoro/samples/python3.7/periodic.cfg create mode 100644 .kokoro/samples/python3.7/presubmit.cfg create mode 100644 .kokoro/samples/python3.8/common.cfg create mode 100644 .kokoro/samples/python3.8/continuous.cfg create mode 100644 .kokoro/samples/python3.8/periodic.cfg create mode 100644 .kokoro/samples/python3.8/presubmit.cfg create mode 100755 .kokoro/test-samples.sh create mode 100644 docs/multiprocessing.rst create mode 100755 scripts/decrypt-secrets.sh create mode 100644 scripts/readme-gen/readme_gen.py create mode 100644 scripts/readme-gen/templates/README.tmpl.rst create mode 100644 scripts/readme-gen/templates/auth.tmpl.rst create mode 100644 scripts/readme-gen/templates/auth_api_key.tmpl.rst create mode 100644 scripts/readme-gen/templates/install_deps.tmpl.rst create mode 100644 scripts/readme-gen/templates/install_portaudio.tmpl.rst create mode 100644 testing/.gitignore diff --git a/.coveragerc b/.coveragerc index b178b094..dd39c854 100644 --- a/.coveragerc +++ b/.coveragerc @@ -1,3 +1,19 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # Generated by synthtool. DO NOT EDIT! [run] branch = True diff --git a/.flake8 b/.flake8 index 0268ecc9..ed931638 100644 --- a/.flake8 +++ b/.flake8 @@ -1,3 +1,19 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # Generated by synthtool. DO NOT EDIT! [flake8] ignore = E203, E266, E501, W503 @@ -5,6 +21,8 @@ exclude = # Exclude generated code. **/proto/** **/gapic/** + **/services/** + **/types/** *_pb2.py # Standard linting exemptions. diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 3127a03a..4836b9eb 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -11,8 +11,7 @@ Thanks for stopping by to let us know something could be better! Please run down the following list and make sure you've tried the usual "quick fixes": - Search the issues already opened: https://github.com/googleapis/python-automl/issues - - Search the issues on our "catch-all" repository: https://github.com/googleapis/google-cloud-python - - Search StackOverflow: http://stackoverflow.com/questions/tagged/google-cloud-platform+python + - Search StackOverflow: https://stackoverflow.com/questions/tagged/google-cloud-platform+python If you are still having issues, please be sure to include as much information as possible: diff --git a/.gitignore b/.gitignore index 3fb06e09..b87e1ed5 100644 --- a/.gitignore +++ b/.gitignore @@ -10,6 +10,7 @@ dist build eggs +.eggs parts bin var @@ -49,6 +50,7 @@ bigquery/docs/generated # Virtual environment env/ coverage.xml +sponge_log.xml # System test environment variables. system_tests/local_test_setup diff --git a/.kokoro/publish-docs.sh b/.kokoro/publish-docs.sh index b157f117..7aff8a9e 100755 --- a/.kokoro/publish-docs.sh +++ b/.kokoro/publish-docs.sh @@ -13,8 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -#!/bin/bash - set -eo pipefail # Disable buffering, so that the logs stream through. diff --git a/.kokoro/release.sh b/.kokoro/release.sh index ba265923..6f8265f3 100755 --- a/.kokoro/release.sh +++ b/.kokoro/release.sh @@ -13,8 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -#!/bin/bash - set -eo pipefail # Start the releasetool reporter diff --git a/.kokoro/samples/lint/common.cfg b/.kokoro/samples/lint/common.cfg new file mode 100644 index 00000000..c6585101 --- /dev/null +++ b/.kokoro/samples/lint/common.cfg @@ -0,0 +1,34 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Specify which tests to run +env_vars: { + key: "RUN_TESTS_SESSION" + value: "lint" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-automl/.kokoro/test-samples.sh" +} + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" +} + +# Download secrets for samples +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "python-automl/.kokoro/trampoline.sh" \ No newline at end of file diff --git a/.kokoro/samples/lint/continuous.cfg b/.kokoro/samples/lint/continuous.cfg new file mode 100644 index 00000000..a1c8d975 --- /dev/null +++ b/.kokoro/samples/lint/continuous.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/.kokoro/samples/lint/periodic.cfg b/.kokoro/samples/lint/periodic.cfg new file mode 100644 index 00000000..50fec964 --- /dev/null +++ b/.kokoro/samples/lint/periodic.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "False" +} \ No newline at end of file diff --git a/.kokoro/samples/lint/presubmit.cfg b/.kokoro/samples/lint/presubmit.cfg new file mode 100644 index 00000000..a1c8d975 --- /dev/null +++ b/.kokoro/samples/lint/presubmit.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/.kokoro/samples/python3.6/common.cfg b/.kokoro/samples/python3.6/common.cfg new file mode 100644 index 00000000..a67eebd6 --- /dev/null +++ b/.kokoro/samples/python3.6/common.cfg @@ -0,0 +1,34 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Specify which tests to run +env_vars: { + key: "RUN_TESTS_SESSION" + value: "py-3.6" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-automl/.kokoro/test-samples.sh" +} + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" +} + +# Download secrets for samples +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "python-automl/.kokoro/trampoline.sh" \ No newline at end of file diff --git a/.kokoro/samples/python3.6/continuous.cfg b/.kokoro/samples/python3.6/continuous.cfg new file mode 100644 index 00000000..7218af14 --- /dev/null +++ b/.kokoro/samples/python3.6/continuous.cfg @@ -0,0 +1,7 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} + diff --git a/.kokoro/samples/python3.6/periodic.cfg b/.kokoro/samples/python3.6/periodic.cfg new file mode 100644 index 00000000..50fec964 --- /dev/null +++ b/.kokoro/samples/python3.6/periodic.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "False" +} \ No newline at end of file diff --git a/.kokoro/samples/python3.6/presubmit.cfg b/.kokoro/samples/python3.6/presubmit.cfg new file mode 100644 index 00000000..a1c8d975 --- /dev/null +++ b/.kokoro/samples/python3.6/presubmit.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/.kokoro/samples/python3.7/common.cfg b/.kokoro/samples/python3.7/common.cfg new file mode 100644 index 00000000..6fa14a09 --- /dev/null +++ b/.kokoro/samples/python3.7/common.cfg @@ -0,0 +1,34 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Specify which tests to run +env_vars: { + key: "RUN_TESTS_SESSION" + value: "py-3.7" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-automl/.kokoro/test-samples.sh" +} + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" +} + +# Download secrets for samples +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "python-automl/.kokoro/trampoline.sh" \ No newline at end of file diff --git a/.kokoro/samples/python3.7/continuous.cfg b/.kokoro/samples/python3.7/continuous.cfg new file mode 100644 index 00000000..a1c8d975 --- /dev/null +++ b/.kokoro/samples/python3.7/continuous.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/.kokoro/samples/python3.7/periodic.cfg b/.kokoro/samples/python3.7/periodic.cfg new file mode 100644 index 00000000..50fec964 --- /dev/null +++ b/.kokoro/samples/python3.7/periodic.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "False" +} \ No newline at end of file diff --git a/.kokoro/samples/python3.7/presubmit.cfg b/.kokoro/samples/python3.7/presubmit.cfg new file mode 100644 index 00000000..a1c8d975 --- /dev/null +++ b/.kokoro/samples/python3.7/presubmit.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/.kokoro/samples/python3.8/common.cfg b/.kokoro/samples/python3.8/common.cfg new file mode 100644 index 00000000..a74006d4 --- /dev/null +++ b/.kokoro/samples/python3.8/common.cfg @@ -0,0 +1,34 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Specify which tests to run +env_vars: { + key: "RUN_TESTS_SESSION" + value: "py-3.8" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-automl/.kokoro/test-samples.sh" +} + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" +} + +# Download secrets for samples +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "python-automl/.kokoro/trampoline.sh" \ No newline at end of file diff --git a/.kokoro/samples/python3.8/continuous.cfg b/.kokoro/samples/python3.8/continuous.cfg new file mode 100644 index 00000000..a1c8d975 --- /dev/null +++ b/.kokoro/samples/python3.8/continuous.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/.kokoro/samples/python3.8/periodic.cfg b/.kokoro/samples/python3.8/periodic.cfg new file mode 100644 index 00000000..50fec964 --- /dev/null +++ b/.kokoro/samples/python3.8/periodic.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "False" +} \ No newline at end of file diff --git a/.kokoro/samples/python3.8/presubmit.cfg b/.kokoro/samples/python3.8/presubmit.cfg new file mode 100644 index 00000000..a1c8d975 --- /dev/null +++ b/.kokoro/samples/python3.8/presubmit.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/.kokoro/test-samples.sh b/.kokoro/test-samples.sh new file mode 100755 index 00000000..14c39db4 --- /dev/null +++ b/.kokoro/test-samples.sh @@ -0,0 +1,104 @@ +#!/bin/bash +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# `-e` enables the script to automatically fail when a command fails +# `-o pipefail` sets the exit code to the rightmost comment to exit with a non-zero +set -eo pipefail +# Enables `**` to include files nested inside sub-folders +shopt -s globstar + +cd github/python-automl + +# Run periodic samples tests at latest release +if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"periodic"* ]]; then + LATEST_RELEASE=$(git describe --abbrev=0 --tags) + git checkout $LATEST_RELEASE +fi + +# Disable buffering, so that the logs stream through. +export PYTHONUNBUFFERED=1 + +# Debug: show build environment +env | grep KOKORO + +# Install nox +python3.6 -m pip install --upgrade --quiet nox + +# Use secrets acessor service account to get secrets +if [[ -f "${KOKORO_GFILE_DIR}/secrets_viewer_service_account.json" ]]; then + gcloud auth activate-service-account \ + --key-file="${KOKORO_GFILE_DIR}/secrets_viewer_service_account.json" \ + --project="cloud-devrel-kokoro-resources" +fi + +# This script will create 3 files: +# - testing/test-env.sh +# - testing/service-account.json +# - testing/client-secrets.json +./scripts/decrypt-secrets.sh + +source ./testing/test-env.sh +export GOOGLE_APPLICATION_CREDENTIALS=$(pwd)/testing/service-account.json + +# For cloud-run session, we activate the service account for gcloud sdk. +gcloud auth activate-service-account \ + --key-file "${GOOGLE_APPLICATION_CREDENTIALS}" + +export GOOGLE_CLIENT_SECRETS=$(pwd)/testing/client-secrets.json + +echo -e "\n******************** TESTING PROJECTS ********************" + +# Switch to 'fail at end' to allow all tests to complete before exiting. +set +e +# Use RTN to return a non-zero value if the test fails. +RTN=0 +ROOT=$(pwd) +# Find all requirements.txt in the samples directory (may break on whitespace). +for file in samples/**/requirements.txt; do + cd "$ROOT" + # Navigate to the project folder. + file=$(dirname "$file") + cd "$file" + + echo "------------------------------------------------------------" + echo "- testing $file" + echo "------------------------------------------------------------" + + # Use nox to execute the tests for the project. + python3.6 -m nox -s "$RUN_TESTS_SESSION" + EXIT=$? + + # If this is a periodic build, send the test log to the Build Cop Bot. + # See https://github.com/googleapis/repo-automation-bots/tree/master/packages/buildcop. + if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"periodic"* ]]; then + chmod +x $KOKORO_GFILE_DIR/linux_amd64/buildcop + $KOKORO_GFILE_DIR/linux_amd64/buildcop + fi + + if [[ $EXIT -ne 0 ]]; then + RTN=1 + echo -e "\n Testing failed: Nox returned a non-zero exit code. \n" + else + echo -e "\n Testing completed.\n" + fi + +done +cd "$ROOT" + +# Workaround for Kokoro permissions issue: delete secrets +rm testing/{test-env.sh,client-secrets.json,service-account.json} + +exit "$RTN" \ No newline at end of file diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 1e9731cb..6d6bd916 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -22,7 +22,7 @@ In order to add a feature: documentation. - The feature must work fully on the following CPython versions: 2.7, - 3.5, 3.6, and 3.7 on both UNIX and Windows. + 3.5, 3.6, 3.7 and 3.8 on both UNIX and Windows. - The feature must not add unnecessary dependencies (where "unnecessary" is of course subjective, but new dependencies should @@ -214,26 +214,18 @@ We support: - `Python 3.5`_ - `Python 3.6`_ - `Python 3.7`_ +- `Python 3.8`_ .. _Python 3.5: https://docs.python.org/3.5/ .. _Python 3.6: https://docs.python.org/3.6/ .. _Python 3.7: https://docs.python.org/3.7/ +.. _Python 3.8: https://docs.python.org/3.8/ Supported versions can be found in our ``noxfile.py`` `config`_. .. _config: https://github.com/googleapis/python-automl/blob/master/noxfile.py -We explicitly decided not to support `Python 2.5`_ due to `decreased usage`_ -and lack of continuous integration `support`_. - -.. _Python 2.5: https://docs.python.org/2.5/ -.. _decreased usage: https://caremad.io/2013/10/a-look-at-pypi-downloads/ -.. _support: https://blog.travis-ci.com/2013-11-18-upcoming-build-environment-updates/ - -We have `dropped 2.6`_ as a supported version as well since Python 2.6 is no -longer supported by the core development team. - Python 2.7 support is deprecated. All code changes should maintain Python 2.7 compatibility until January 1, 2020. We also explicitly decided to support Python 3 beginning with version @@ -247,7 +239,6 @@ We also explicitly decided to support Python 3 beginning with version .. _prominent: https://docs.djangoproject.com/en/1.9/faq/install/#what-python-version-can-i-use-with-django .. _projects: http://flask.pocoo.org/docs/0.10/python3/ .. _Unicode literal support: https://www.python.org/dev/peps/pep-0414/ -.. _dropped 2.6: https://github.com/googleapis/google-cloud-python/issues/995 ********** Versioning diff --git a/MANIFEST.in b/MANIFEST.in index cd011be2..e9e29d12 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,6 +1,25 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # Generated by synthtool. DO NOT EDIT! include README.rst LICENSE recursive-include google *.json *.proto recursive-include tests * global-exclude *.py[co] global-exclude __pycache__ + +# Exclude scripts for samples readmegen +prune scripts/readme-gen \ No newline at end of file diff --git a/docs/conf.py b/docs/conf.py index 0b6aebeb..a2e8ab1e 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -38,21 +38,18 @@ "sphinx.ext.napoleon", "sphinx.ext.todo", "sphinx.ext.viewcode", + "recommonmark", ] # autodoc/autosummary flags autoclass_content = "both" -autodoc_default_flags = ["members"] +autodoc_default_options = {"members": True} autosummary_generate = True # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] -# Allow markdown includes (so releases.md can include CHANGLEOG.md) -# http://www.sphinx-doc.org/en/master/markdown.html -source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} - # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] @@ -340,7 +337,7 @@ intersphinx_mapping = { "python": ("http://python.readthedocs.org/en/latest/", None), "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), - "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None), + "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None,), "grpc": ("https://grpc.io/grpc/python/", None), } diff --git a/docs/index.rst b/docs/index.rst index 90c2bfd5..5473e0d7 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,5 +1,7 @@ .. include:: README.rst +.. include:: multiprocessing.rst + This package includes clients for multiple versions of the Cloud AutoML API. By default, you will get ``v1``, the latest stable version. diff --git a/docs/multiprocessing.rst b/docs/multiprocessing.rst new file mode 100644 index 00000000..1cb29d4c --- /dev/null +++ b/docs/multiprocessing.rst @@ -0,0 +1,7 @@ +.. note:: + + Because this client uses :mod:`grpcio` library, it is safe to + share instances across threads. In multiprocessing scenarios, the best + practice is to create client instances *after* the invocation of + :func:`os.fork` by :class:`multiprocessing.Pool` or + :class:`multiprocessing.Process`. diff --git a/google/cloud/automl.py b/google/cloud/automl.py index c1dc4ee7..9dc44cde 100644 --- a/google/cloud/automl.py +++ b/google/cloud/automl.py @@ -23,4 +23,9 @@ from google.cloud.automl_v1 import types -__all__ = ("enums", "types", "AutoMlClient", "PredictionServiceClient") +__all__ = ( + "enums", + "types", + "PredictionServiceClient", + "AutoMlClient", +) diff --git a/google/cloud/automl_v1/__init__.py b/google/cloud/automl_v1/__init__.py index a663f191..3c9ade66 100644 --- a/google/cloud/automl_v1/__init__.py +++ b/google/cloud/automl_v1/__init__.py @@ -27,21 +27,26 @@ if sys.version_info[:2] == (2, 7): message = ( - "A future version of this library will drop support for Python 2.7." - "More details about Python 2 support for Google Cloud Client Libraries" + "A future version of this library will drop support for Python 2.7. " + "More details about Python 2 support for Google Cloud Client Libraries " "can be found at https://cloud.google.com/python/docs/python2-sunset/" ) warnings.warn(message, DeprecationWarning) -class AutoMlClient(auto_ml_client.AutoMlClient): - __doc__ = auto_ml_client.AutoMlClient.__doc__ +class PredictionServiceClient(prediction_service_client.PredictionServiceClient): + __doc__ = prediction_service_client.PredictionServiceClient.__doc__ enums = enums -class PredictionServiceClient(prediction_service_client.PredictionServiceClient): - __doc__ = prediction_service_client.PredictionServiceClient.__doc__ +class AutoMlClient(auto_ml_client.AutoMlClient): + __doc__ = auto_ml_client.AutoMlClient.__doc__ enums = enums -__all__ = ("enums", "types", "AutoMlClient", "PredictionServiceClient") +__all__ = ( + "enums", + "types", + "PredictionServiceClient", + "AutoMlClient", +) diff --git a/google/cloud/automl_v1/gapic/auto_ml_client.py b/google/cloud/automl_v1/gapic/auto_ml_client.py index c1f6ed3d..a870b6bf 100644 --- a/google/cloud/automl_v1/gapic/auto_ml_client.py +++ b/google/cloud/automl_v1/gapic/auto_ml_client.py @@ -38,12 +38,15 @@ from google.cloud.automl_v1.gapic import enums from google.cloud.automl_v1.gapic.transports import auto_ml_grpc_transport from google.cloud.automl_v1.proto import annotation_spec_pb2 +from google.cloud.automl_v1.proto import data_items_pb2 from google.cloud.automl_v1.proto import dataset_pb2 from google.cloud.automl_v1.proto import image_pb2 from google.cloud.automl_v1.proto import io_pb2 from google.cloud.automl_v1.proto import model_evaluation_pb2 from google.cloud.automl_v1.proto import model_pb2 from google.cloud.automl_v1.proto import operations_pb2 as proto_operations_pb2 +from google.cloud.automl_v1.proto import prediction_service_pb2 +from google.cloud.automl_v1.proto import prediction_service_pb2_grpc from google.cloud.automl_v1.proto import service_pb2 from google.cloud.automl_v1.proto import service_pb2_grpc from google.longrunning import operations_pb2 as longrunning_operations_pb2 @@ -51,7 +54,7 @@ from google.protobuf import field_mask_pb2 -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-automl").version +_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-automl",).version class AutoMlClient(object): @@ -69,7 +72,7 @@ class AutoMlClient(object): Currently the only supported ``location_id`` is "us-central1". On any input that is documented to expect a string parameter in - snake\_case or kebab-case, either of those cases is accepted. + snake_case or kebab-case, either of those cases is accepted. """ SERVICE_ADDRESS = "automl.googleapis.com:443" @@ -237,12 +240,12 @@ def __init__( self.transport = transport else: self.transport = auto_ml_grpc_transport.AutoMlGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials + address=api_endpoint, channel=channel, credentials=credentials, ) if client_info is None: client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION + gapic_version=_GAPIC_LIBRARY_VERSION, ) else: client_info.gapic_version = _GAPIC_LIBRARY_VERSION @@ -253,7 +256,7 @@ def __init__( # (Ordinarily, these are the defaults specified in the `*_config.py` # file next to this one.) self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME] + client_config["interfaces"][self._INTERFACE_NAME], ) # Save a dictionary of cached API call functions. @@ -296,8 +299,8 @@ def create_dataset( >>> metadata = response.metadata() Args: - parent (str): The resource name of the project to create the dataset for. - dataset (Union[dict, ~google.cloud.automl_v1.types.Dataset]): The dataset to create. + parent (str): Required. The resource name of the project to create the dataset for. + dataset (Union[dict, ~google.cloud.automl_v1.types.Dataset]): Required. The dataset to create. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.automl_v1.types.Dataset` @@ -331,7 +334,7 @@ def create_dataset( client_info=self._client_info, ) - request = service_pb2.CreateDatasetRequest(parent=parent, dataset=dataset) + request = service_pb2.CreateDatasetRequest(parent=parent, dataset=dataset,) if metadata is None: metadata = [] metadata = list(metadata) @@ -355,89 +358,6 @@ def create_dataset( metadata_type=proto_operations_pb2.OperationMetadata, ) - def update_dataset( - self, - dataset, - update_mask, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates a dataset. - - Example: - >>> from google.cloud import automl_v1 - >>> - >>> client = automl_v1.AutoMlClient() - >>> - >>> # TODO: Initialize `dataset`: - >>> dataset = {} - >>> - >>> # TODO: Initialize `update_mask`: - >>> update_mask = {} - >>> - >>> response = client.update_dataset(dataset, update_mask) - - Args: - dataset (Union[dict, ~google.cloud.automl_v1.types.Dataset]): The dataset which replaces the resource on the server. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1.types.Dataset` - update_mask (Union[dict, ~google.cloud.automl_v1.types.FieldMask]): Required. The update mask applies to the resource. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1.types.FieldMask` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.automl_v1.types.Dataset` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "update_dataset" not in self._inner_api_calls: - self._inner_api_calls[ - "update_dataset" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_dataset, - default_retry=self._method_configs["UpdateDataset"].retry, - default_timeout=self._method_configs["UpdateDataset"].timeout, - client_info=self._client_info, - ) - - request = service_pb2.UpdateDatasetRequest( - dataset=dataset, update_mask=update_mask - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("dataset.name", dataset.name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["update_dataset"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - def get_dataset( self, name, @@ -458,7 +378,7 @@ def get_dataset( >>> response = client.get_dataset(name) Args: - name (str): The resource name of the dataset to retrieve. + name (str): Required. The resource name of the dataset to retrieve. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -489,7 +409,7 @@ def get_dataset( client_info=self._client_info, ) - request = service_pb2.GetDatasetRequest(name=name) + request = service_pb2.GetDatasetRequest(name=name,) if metadata is None: metadata = [] metadata = list(metadata) @@ -541,15 +461,15 @@ def list_datasets( ... pass Args: - parent (str): The resource name of the project from which to list datasets. + parent (str): Required. The resource name of the project from which to list datasets. filter_ (str): An expression for filtering the results of the request. - ``dataset_metadata`` - for existence of the case (e.g. - image\_classification\_dataset\_metadata:\*). Some examples of using - the filter are: + ````image_classification_dataset_metadata````). Some examples of using the + filter are: - ``translation_dataset_metadata:*`` --> The dataset has - translation\_dataset\_metadata. + translation_dataset_metadata. page_size (int): The maximum number of resources contained in the underlying API response. If page streaming is performed per- resource, this parameter does not affect the return value. If page @@ -589,7 +509,7 @@ def list_datasets( ) request = service_pb2.ListDatasetsRequest( - parent=parent, filter=filter_, page_size=page_size + parent=parent, filter=filter_, page_size=page_size, ) if metadata is None: metadata = [] @@ -619,6 +539,89 @@ def list_datasets( ) return iterator + def update_dataset( + self, + dataset, + update_mask, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Updates a dataset. + + Example: + >>> from google.cloud import automl_v1 + >>> + >>> client = automl_v1.AutoMlClient() + >>> + >>> # TODO: Initialize `dataset`: + >>> dataset = {} + >>> + >>> # TODO: Initialize `update_mask`: + >>> update_mask = {} + >>> + >>> response = client.update_dataset(dataset, update_mask) + + Args: + dataset (Union[dict, ~google.cloud.automl_v1.types.Dataset]): Required. The dataset which replaces the resource on the server. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.automl_v1.types.Dataset` + update_mask (Union[dict, ~google.cloud.automl_v1.types.FieldMask]): Required. The update mask applies to the resource. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.automl_v1.types.FieldMask` + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.automl_v1.types.Dataset` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "update_dataset" not in self._inner_api_calls: + self._inner_api_calls[ + "update_dataset" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.update_dataset, + default_retry=self._method_configs["UpdateDataset"].retry, + default_timeout=self._method_configs["UpdateDataset"].timeout, + client_info=self._client_info, + ) + + request = service_pb2.UpdateDatasetRequest( + dataset=dataset, update_mask=update_mask, + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("dataset.name", dataset.name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + return self._inner_api_calls["update_dataset"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + def delete_dataset( self, name, @@ -627,8 +630,8 @@ def delete_dataset( metadata=None, ): """ - Deletes a dataset and all of its contents. Returns empty response in the - ``response`` field when it completes, and ``delete_details`` in the + Deletes a dataset and all of its contents. Returns empty response in + the ``response`` field when it completes, and ``delete_details`` in the ``metadata`` field. Example: @@ -650,7 +653,7 @@ def delete_dataset( >>> metadata = response.metadata() Args: - name (str): The resource name of the dataset to delete. + name (str): Required. The resource name of the dataset to delete. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -681,7 +684,7 @@ def delete_dataset( client_info=self._client_info, ) - request = service_pb2.DeleteDatasetRequest(name=name) + request = service_pb2.DeleteDatasetRequest(name=name,) if metadata is None: metadata = [] metadata = list(metadata) @@ -714,7 +717,14 @@ def import_data( metadata=None, ): """ - Imports data into a dataset. + Imports data into a dataset. For Tables this method can only be + called on an empty Dataset. + + For Tables: + + - A ``schema_inference_version`` parameter must be explicitly set. + Returns an empty response in the ``response`` field when it + completes. Example: >>> from google.cloud import automl_v1 @@ -775,7 +785,7 @@ def import_data( client_info=self._client_info, ) - request = service_pb2.ImportDataRequest(name=name, input_config=input_config) + request = service_pb2.ImportDataRequest(name=name, input_config=input_config,) if metadata is None: metadata = [] metadata = list(metadata) @@ -808,8 +818,8 @@ def export_data( metadata=None, ): """ - Exports dataset's data to the provided output location. Returns an empty - response in the ``response`` field when it completes. + Exports dataset's data to the provided output location. Returns an + empty response in the ``response`` field when it completes. Example: >>> from google.cloud import automl_v1 @@ -868,7 +878,7 @@ def export_data( client_info=self._client_info, ) - request = service_pb2.ExportDataRequest(name=name, output_config=output_config) + request = service_pb2.ExportDataRequest(name=name, output_config=output_config,) if metadata is None: metadata = [] metadata = list(metadata) @@ -912,7 +922,7 @@ def get_annotation_spec( >>> response = client.get_annotation_spec(name) Args: - name (str): The resource name of the annotation spec to retrieve. + name (str): Required. The resource name of the annotation spec to retrieve. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -943,7 +953,7 @@ def get_annotation_spec( client_info=self._client_info, ) - request = service_pb2.GetAnnotationSpecRequest(name=name) + request = service_pb2.GetAnnotationSpecRequest(name=name,) if metadata is None: metadata = [] metadata = list(metadata) @@ -997,8 +1007,8 @@ def create_model( >>> metadata = response.metadata() Args: - parent (str): Resource name of the parent project where the model is being created. - model (Union[dict, ~google.cloud.automl_v1.types.Model]): The model to create. + parent (str): Required. Resource name of the parent project where the model is being created. + model (Union[dict, ~google.cloud.automl_v1.types.Model]): Required. The model to create. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.automl_v1.types.Model` @@ -1032,7 +1042,7 @@ def create_model( client_info=self._client_info, ) - request = service_pb2.CreateModelRequest(parent=parent, model=model) + request = service_pb2.CreateModelRequest(parent=parent, model=model,) if metadata is None: metadata = [] metadata = list(metadata) @@ -1076,7 +1086,7 @@ def get_model( >>> response = client.get_model(name) Args: - name (str): Resource name of the model. + name (str): Required. Resource name of the model. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -1107,7 +1117,7 @@ def get_model( client_info=self._client_info, ) - request = service_pb2.GetModelRequest(name=name) + request = service_pb2.GetModelRequest(name=name,) if metadata is None: metadata = [] metadata = list(metadata) @@ -1125,87 +1135,6 @@ def get_model( request, retry=retry, timeout=timeout, metadata=metadata ) - def update_model( - self, - model, - update_mask, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates a model. - - Example: - >>> from google.cloud import automl_v1 - >>> - >>> client = automl_v1.AutoMlClient() - >>> - >>> # TODO: Initialize `model`: - >>> model = {} - >>> - >>> # TODO: Initialize `update_mask`: - >>> update_mask = {} - >>> - >>> response = client.update_model(model, update_mask) - - Args: - model (Union[dict, ~google.cloud.automl_v1.types.Model]): The model which replaces the resource on the server. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1.types.Model` - update_mask (Union[dict, ~google.cloud.automl_v1.types.FieldMask]): Required. The update mask applies to the resource. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1.types.FieldMask` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.automl_v1.types.Model` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "update_model" not in self._inner_api_calls: - self._inner_api_calls[ - "update_model" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_model, - default_retry=self._method_configs["UpdateModel"].retry, - default_timeout=self._method_configs["UpdateModel"].timeout, - client_info=self._client_info, - ) - - request = service_pb2.UpdateModelRequest(model=model, update_mask=update_mask) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("model.name", model.name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["update_model"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - def list_models( self, parent, @@ -1240,16 +1169,16 @@ def list_models( ... pass Args: - parent (str): Resource name of the project, from which to list the models. + parent (str): Required. Resource name of the project, from which to list the models. filter_ (str): An expression for filtering the results of the request. - ``model_metadata`` - for existence of the case (e.g. - image\_classification\_model\_metadata:\*). + ````video_classification_model_metadata:*````). - ``dataset_id`` - for = or !=. Some examples of using the filter are: - ``image_classification_model_metadata:*`` --> The model has - image\_classification\_model\_metadata. + image_classification_model_metadata. - ``dataset_id=5`` --> The model was created from a dataset with ID 5. page_size (int): The maximum number of resources contained in the @@ -1291,7 +1220,7 @@ def list_models( ) request = service_pb2.ListModelsRequest( - parent=parent, filter=filter_, page_size=page_size + parent=parent, filter=filter_, page_size=page_size, ) if metadata is None: metadata = [] @@ -1329,9 +1258,9 @@ def delete_model( metadata=None, ): """ - Deletes a model. Returns ``google.protobuf.Empty`` in the ``response`` - field when it completes, and ``delete_details`` in the ``metadata`` - field. + Deletes a model. Returns ``google.protobuf.Empty`` in the + ``response`` field when it completes, and ``delete_details`` in the + ``metadata`` field. Example: >>> from google.cloud import automl_v1 @@ -1352,7 +1281,7 @@ def delete_model( >>> metadata = response.metadata() Args: - name (str): Resource name of the model being deleted. + name (str): Required. Resource name of the model being deleted. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -1383,7 +1312,7 @@ def delete_model( client_info=self._client_info, ) - request = service_pb2.DeleteModelRequest(name=name) + request = service_pb2.DeleteModelRequest(name=name,) if metadata is None: metadata = [] metadata = list(metadata) @@ -1407,6 +1336,87 @@ def delete_model( metadata_type=proto_operations_pb2.OperationMetadata, ) + def update_model( + self, + model, + update_mask, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Updates a model. + + Example: + >>> from google.cloud import automl_v1 + >>> + >>> client = automl_v1.AutoMlClient() + >>> + >>> # TODO: Initialize `model`: + >>> model = {} + >>> + >>> # TODO: Initialize `update_mask`: + >>> update_mask = {} + >>> + >>> response = client.update_model(model, update_mask) + + Args: + model (Union[dict, ~google.cloud.automl_v1.types.Model]): Required. The model which replaces the resource on the server. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.automl_v1.types.Model` + update_mask (Union[dict, ~google.cloud.automl_v1.types.FieldMask]): Required. The update mask applies to the resource. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.automl_v1.types.FieldMask` + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.automl_v1.types.Model` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "update_model" not in self._inner_api_calls: + self._inner_api_calls[ + "update_model" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.update_model, + default_retry=self._method_configs["UpdateModel"].retry, + default_timeout=self._method_configs["UpdateModel"].timeout, + client_info=self._client_info, + ) + + request = service_pb2.UpdateModelRequest(model=model, update_mask=update_mask,) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("model.name", model.name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + return self._inner_api_calls["update_model"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + def deploy_model( self, name, @@ -1417,15 +1427,16 @@ def deploy_model( metadata=None, ): """ - Deploys a model. If a model is already deployed, deploying it with the - same parameters has no effect. Deploying with different parametrs (as - e.g. changing + Deploys a model. If a model is already deployed, deploying it with + the same parameters has no effect. Deploying with different parametrs + (as e.g. changing ``node_number``) will reset the deployment state without pausing the model's availability. - Only applicable for Text Classification, Image Object Detection; all - other domains manage deployment automatically. + Only applicable for Text Classification, Image Object Detection , + Tables, and Image Segmentation; all other domains manage deployment + automatically. Returns an empty response in the ``response`` field when it completes. @@ -1448,7 +1459,7 @@ def deploy_model( >>> metadata = response.metadata() Args: - name (str): Resource name of the model to deploy. + name (str): Required. Resource name of the model to deploy. image_object_detection_model_deployment_metadata (Union[dict, ~google.cloud.automl_v1.types.ImageObjectDetectionModelDeploymentMetadata]): Model deployment metadata specific to Image Object Detection. If a dict is provided, it must be of the same form as the protobuf @@ -1533,8 +1544,8 @@ def undeploy_model( Undeploys a model. If the model is not deployed this method has no effect. - Only applicable for Text Classification, Image Object Detection; all - other domains manage deployment automatically. + Only applicable for Text Classification, Image Object Detection and + Tables; all other domains manage deployment automatically. Returns an empty response in the ``response`` field when it completes. @@ -1557,7 +1568,7 @@ def undeploy_model( >>> metadata = response.metadata() Args: - name (str): Resource name of the model to undeploy. + name (str): Required. Resource name of the model to undeploy. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -1588,7 +1599,7 @@ def undeploy_model( client_info=self._client_info, ) - request = service_pb2.UndeployModelRequest(name=name) + request = service_pb2.UndeployModelRequest(name=name,) if metadata is None: metadata = [] metadata = list(metadata) @@ -1621,9 +1632,9 @@ def export_model( metadata=None, ): """ - Exports a trained, "export-able", model to a user specified Google Cloud - Storage location. A model is considered export-able if and only if it - has an export format defined for it in ``ModelExportOutputConfig``. + Exports a trained, "export-able", model to a user specified Google + Cloud Storage location. A model is considered export-able if and only if + it has an export format defined for it in ``ModelExportOutputConfig``. Returns an empty response in the ``response`` field when it completes. @@ -1684,7 +1695,9 @@ def export_model( client_info=self._client_info, ) - request = service_pb2.ExportModelRequest(name=name, output_config=output_config) + request = service_pb2.ExportModelRequest( + name=name, output_config=output_config, + ) if metadata is None: metadata = [] metadata = list(metadata) @@ -1728,7 +1741,7 @@ def get_model_evaluation( >>> response = client.get_model_evaluation(name) Args: - name (str): Resource name for the model evaluation. + name (str): Required. Resource name for the model evaluation. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -1759,7 +1772,7 @@ def get_model_evaluation( client_info=self._client_info, ) - request = service_pb2.GetModelEvaluationRequest(name=name) + request = service_pb2.GetModelEvaluationRequest(name=name,) if metadata is None: metadata = [] metadata = list(metadata) @@ -1814,10 +1827,10 @@ def list_model_evaluations( ... pass Args: - parent (str): Resource name of the model to list the model evaluations for. + parent (str): Required. Resource name of the model to list the model evaluations for. If modelId is set as "-", this will list model evaluations from across all models of the parent location. - filter_ (str): An expression for filtering the results of the request. + filter_ (str): Required. An expression for filtering the results of the request. - ``annotation_spec_id`` - for =, != or existence. See example below for the last. @@ -1867,7 +1880,7 @@ def list_model_evaluations( ) request = service_pb2.ListModelEvaluationsRequest( - parent=parent, filter=filter_, page_size=page_size + parent=parent, filter=filter_, page_size=page_size, ) if metadata is None: metadata = [] diff --git a/google/cloud/automl_v1/gapic/auto_ml_client_config.py b/google/cloud/automl_v1/gapic/auto_ml_client_config.py index e54353d4..0c89b881 100644 --- a/google/cloud/automl_v1/gapic/auto_ml_client_config.py +++ b/google/cloud/automl_v1/gapic/auto_ml_client_config.py @@ -2,110 +2,129 @@ "interfaces": { "google.cloud.automl.v1.AutoMl": { "retry_codes": { - "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], - "non_idempotent": [], + "retry_policy_1_codes": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], + "no_retry_codes": [], + "no_retry_1_codes": [], }, "retry_params": { - "default": { + "retry_policy_1_params": { "initial_retry_delay_millis": 100, "retry_delay_multiplier": 1.3, "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 20000, + "initial_rpc_timeout_millis": 5000, "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 20000, - "total_timeout_millis": 600000, - } + "max_rpc_timeout_millis": 5000, + "total_timeout_millis": 5000, + }, + "no_retry_params": { + "initial_retry_delay_millis": 0, + "retry_delay_multiplier": 0.0, + "max_retry_delay_millis": 0, + "initial_rpc_timeout_millis": 0, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 0, + "total_timeout_millis": 0, + }, + "no_retry_1_params": { + "initial_retry_delay_millis": 0, + "retry_delay_multiplier": 0.0, + "max_retry_delay_millis": 0, + "initial_rpc_timeout_millis": 5000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 5000, + "total_timeout_millis": 5000, + }, }, "methods": { "CreateDataset": { "timeout_millis": 5000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "UpdateDataset": { - "timeout_millis": 5000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "retry_codes_name": "no_retry_1_codes", + "retry_params_name": "no_retry_1_params", }, "GetDataset": { "timeout_millis": 5000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params", }, "ListDatasets": { "timeout_millis": 50000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params", + }, + "UpdateDataset": { + "timeout_millis": 5000, + "retry_codes_name": "no_retry_1_codes", + "retry_params_name": "no_retry_1_params", }, "DeleteDataset": { "timeout_millis": 5000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params", }, "ImportData": { "timeout_millis": 20000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "retry_codes_name": "no_retry_1_codes", + "retry_params_name": "no_retry_1_params", }, "ExportData": { "timeout_millis": 5000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "retry_codes_name": "no_retry_1_codes", + "retry_params_name": "no_retry_1_params", }, "GetAnnotationSpec": { "timeout_millis": 5000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params", }, "CreateModel": { "timeout_millis": 20000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "retry_codes_name": "no_retry_1_codes", + "retry_params_name": "no_retry_1_params", }, "GetModel": { "timeout_millis": 5000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", - }, - "UpdateModel": { - "timeout_millis": 5000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params", }, "ListModels": { "timeout_millis": 50000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params", }, "DeleteModel": { "timeout_millis": 5000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params", + }, + "UpdateModel": { + "timeout_millis": 5000, + "retry_codes_name": "no_retry_1_codes", + "retry_params_name": "no_retry_1_params", }, "DeployModel": { "timeout_millis": 5000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "retry_codes_name": "no_retry_1_codes", + "retry_params_name": "no_retry_1_params", }, "UndeployModel": { "timeout_millis": 5000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "retry_codes_name": "no_retry_1_codes", + "retry_params_name": "no_retry_1_params", }, "ExportModel": { "timeout_millis": 5000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "retry_codes_name": "no_retry_1_codes", + "retry_params_name": "no_retry_1_params", }, "GetModelEvaluation": { "timeout_millis": 5000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params", }, "ListModelEvaluations": { "timeout_millis": 50000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params", }, }, } diff --git a/google/cloud/automl_v1/gapic/enums.py b/google/cloud/automl_v1/gapic/enums.py index 8e525587..7bb5f2cb 100644 --- a/google/cloud/automl_v1/gapic/enums.py +++ b/google/cloud/automl_v1/gapic/enums.py @@ -45,12 +45,12 @@ class TextSegmentType(enum.IntEnum): TOKEN (int): The text segment is a token. e.g. word. PARAGRAPH (int): The text segment is a paragraph. FORM_FIELD (int): The text segment is a form field. - FORM_FIELD_NAME (int): The text segment is the name part of a form field. It will be treated as - child of another FORM\_FIELD TextSegment if its span is subspan of - another TextSegment with type FORM\_FIELD. - FORM_FIELD_CONTENTS (int): The text segment is the text content part of a form field. It will be - treated as child of another FORM\_FIELD TextSegment if its span is - subspan of another TextSegment with type FORM\_FIELD. + FORM_FIELD_NAME (int): The text segment is the name part of a form field. It will be + treated as child of another FORM_FIELD TextSegment if its span is + subspan of another TextSegment with type FORM_FIELD. + FORM_FIELD_CONTENTS (int): The text segment is the text content part of a form field. It will + be treated as child of another FORM_FIELD TextSegment if its span is + subspan of another TextSegment with type FORM_FIELD. TABLE (int): The text segment is a whole table, including headers, and all rows. TABLE_HEADER (int): The text segment is a table's headers. It will be treated as child of another TABLE TextSegment if its span is subspan of another TextSegment @@ -59,8 +59,8 @@ class TextSegmentType(enum.IntEnum): another TABLE TextSegment if its span is subspan of another TextSegment with type TABLE. TABLE_CELL (int): The text segment is a cell in table. It will be treated as child of - another TABLE\_ROW TextSegment if its span is subspan of another - TextSegment with type TABLE\_ROW. + another TABLE_ROW TextSegment if its span is subspan of another + TextSegment with type TABLE_ROW. """ TEXT_SEGMENT_TYPE_UNSPECIFIED = 0 diff --git a/google/cloud/automl_v1/gapic/prediction_service_client.py b/google/cloud/automl_v1/gapic/prediction_service_client.py index d6df5e54..06686df3 100644 --- a/google/cloud/automl_v1/gapic/prediction_service_client.py +++ b/google/cloud/automl_v1/gapic/prediction_service_client.py @@ -34,24 +34,15 @@ from google.cloud.automl_v1.gapic import enums from google.cloud.automl_v1.gapic import prediction_service_client_config from google.cloud.automl_v1.gapic.transports import prediction_service_grpc_transport -from google.cloud.automl_v1.proto import annotation_spec_pb2 from google.cloud.automl_v1.proto import data_items_pb2 -from google.cloud.automl_v1.proto import dataset_pb2 -from google.cloud.automl_v1.proto import image_pb2 from google.cloud.automl_v1.proto import io_pb2 -from google.cloud.automl_v1.proto import model_evaluation_pb2 -from google.cloud.automl_v1.proto import model_pb2 from google.cloud.automl_v1.proto import operations_pb2 as proto_operations_pb2 from google.cloud.automl_v1.proto import prediction_service_pb2 from google.cloud.automl_v1.proto import prediction_service_pb2_grpc -from google.cloud.automl_v1.proto import service_pb2 -from google.cloud.automl_v1.proto import service_pb2_grpc from google.longrunning import operations_pb2 as longrunning_operations_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-automl").version +_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-automl",).version class PredictionServiceClient(object): @@ -59,7 +50,7 @@ class PredictionServiceClient(object): AutoML Prediction API. On any input that is documented to expect a string parameter in - snake\_case or kebab-case, either of those cases is accepted. + snake_case or kebab-case, either of those cases is accepted. """ SERVICE_ADDRESS = "automl.googleapis.com:443" @@ -186,12 +177,12 @@ def __init__( self.transport = transport else: self.transport = prediction_service_grpc_transport.PredictionServiceGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials + address=api_endpoint, channel=channel, credentials=credentials, ) if client_info is None: client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION + gapic_version=_GAPIC_LIBRARY_VERSION, ) else: client_info.gapic_version = _GAPIC_LIBRARY_VERSION @@ -202,7 +193,7 @@ def __init__( # (Ordinarily, these are the defaults specified in the `*_config.py` # file next to this one.) self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME] + client_config["interfaces"][self._INTERFACE_NAME], ) # Save a dictionary of cached API call functions. @@ -222,22 +213,41 @@ def predict( metadata=None, ): """ - Perform an online prediction. The prediction result will be directly - returned in the response. Available for following ML problems, and their - expected request payloads: - - - Image Classification - Image in .JPEG, .GIF or .PNG format, - image\_bytes up to 30MB. - - Image Object Detection - Image in .JPEG, .GIF or .PNG format, - image\_bytes up to 30MB. - - Text Classification - TextSnippet, content up to 60,000 characters, - UTF-8 encoded. - - Text Extraction - TextSnippet, content up to 30,000 characters, UTF-8 - NFC encoded. - - Translation - TextSnippet, content up to 25,000 characters, UTF-8 - encoded. - - Text Sentiment - TextSnippet, content up 500 characters, UTF-8 - encoded. + Perform an online prediction. The prediction result is directly + returned in the response. Available for following ML scenarios, and + their expected request payloads: + + AutoML Vision Classification + + - An image in .JPEG, .GIF or .PNG format, image_bytes up to 30MB. + + AutoML Vision Object Detection + + - An image in .JPEG, .GIF or .PNG format, image_bytes up to 30MB. + + AutoML Natural Language Classification + + - A TextSnippet up to 60,000 characters, UTF-8 encoded or a document in + .PDF, .TIF or .TIFF format with size upto 2MB. + + AutoML Natural Language Entity Extraction + + - A TextSnippet up to 10,000 characters, UTF-8 NFC encoded or a + document in .PDF, .TIF or .TIFF format with size upto 20MB. + + AutoML Natural Language Sentiment Analysis + + - A TextSnippet up to 60,000 characters, UTF-8 encoded or a document in + .PDF, .TIF or .TIFF format with size upto 2MB. + + AutoML Translation + + - A TextSnippet up to 25,000 characters, UTF-8 encoded. + + AutoML Tables + + - A row with column values matching the columns of the model, up to + 5MB. Not available for FORECASTING ``prediction_type``. Example: >>> from google.cloud import automl_v1 @@ -252,27 +262,37 @@ def predict( >>> response = client.predict(name, payload) Args: - name (str): Name of the model requested to serve the prediction. + name (str): Required. Name of the model requested to serve the prediction. payload (Union[dict, ~google.cloud.automl_v1.types.ExamplePayload]): Required. Payload to perform a prediction on. The payload must match the problem type that the model was trained to solve. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.automl_v1.types.ExamplePayload` - params (dict[str -> str]): Additional domain-specific parameters, any string must be up to 25000 - characters long. + params (dict[str -> str]): Additional domain-specific parameters, any string must be up to + 25000 characters long. + + AutoML Vision Classification - - For Image Classification: + ``score_threshold`` : (float) A value from 0.0 to 1.0. When the model + makes predictions for an image, it will only produce results that have + at least this confidence score. The default is 0.5. - ``score_threshold`` - (float) A value from 0.0 to 1.0. When the model - makes predictions for an image, it will only produce results that - have at least this confidence score. The default is 0.5. + AutoML Vision Object Detection - - For Image Object Detection: ``score_threshold`` - (float) When Model - detects objects on the image, it will only produce bounding boxes - which have at least this confidence score. Value in 0 to 1 range, - default is 0.5. ``max_bounding_box_count`` - (int64) No more than - this number of bounding boxes will be returned in the response. - Default is 100, the requested value may be limited by server. + ``score_threshold`` : (float) When Model detects objects on the image, + it will only produce bounding boxes which have at least this confidence + score. Value in 0 to 1 range, default is 0.5. + + ``max_bounding_box_count`` : (int64) The maximum number of bounding + boxes returned. The default is 100. The number of returned bounding + boxes might be limited by the server. + + AutoML Tables + + ``feature_importance`` : (boolean) Whether + + ``feature_importance`` is populated in the returned list of + ``TablesAnnotation`` objects. The default is false. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -304,7 +324,7 @@ def predict( ) request = prediction_service_pb2.PredictRequest( - name=name, payload=payload, params=params + name=name, payload=payload, params=params, ) if metadata is None: metadata = [] @@ -339,11 +359,16 @@ def batch_predict( Instead, a long running operation object is returned. User can poll the operation result via ``GetOperation`` method. Once the operation is done, ``BatchPredictResult`` is returned in the ``response`` field. - Available for following ML problems: + Available for following ML scenarios: - - Image Classification - - Image Object Detection - - Text Extraction + - AutoML Vision Classification + - AutoML Vision Object Detection + - AutoML Video Intelligence Classification + - AutoML Video Intelligence Object Tracking \* AutoML Natural Language + Classification + - AutoML Natural Language Entity Extraction + - AutoML Natural Language Sentiment Analysis + - AutoML Tables Example: >>> from google.cloud import automl_v1 @@ -370,7 +395,7 @@ def batch_predict( >>> metadata = response.metadata() Args: - name (str): Name of the model requested to serve the batch prediction. + name (str): Required. Name of the model requested to serve the batch prediction. input_config (Union[dict, ~google.cloud.automl_v1.types.BatchPredictInputConfig]): Required. The input configuration for batch prediction. If a dict is provided, it must be of the same form as the protobuf @@ -380,29 +405,75 @@ def batch_predict( If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.automl_v1.types.BatchPredictOutputConfig` - params (dict[str -> str]): Additional domain-specific parameters for the predictions, any string - must be up to 25000 characters long. + params (dict[str -> str]): Additional domain-specific parameters for the predictions, any + string must be up to 25000 characters long. + + AutoML Natural Language Classification + + ``score_threshold`` : (float) A value from 0.0 to 1.0. When the model + makes predictions for a text snippet, it will only produce results that + have at least this confidence score. The default is 0.5. + + AutoML Vision Classification + + ``score_threshold`` : (float) A value from 0.0 to 1.0. When the model + makes predictions for an image, it will only produce results that have + at least this confidence score. The default is 0.5. + + AutoML Vision Object Detection + + ``score_threshold`` : (float) When Model detects objects on the image, + it will only produce bounding boxes which have at least this confidence + score. Value in 0 to 1 range, default is 0.5. + + ``max_bounding_box_count`` : (int64) The maximum number of bounding + boxes returned per image. The default is 100, the number of bounding + boxes returned might be limited by the server. AutoML Video Intelligence + Classification + + ``score_threshold`` : (float) A value from 0.0 to 1.0. When the model + makes predictions for a video, it will only produce results that have at + least this confidence score. The default is 0.5. + + ``segment_classification`` : (boolean) Set to true to request + segment-level classification. AutoML Video Intelligence returns labels + and their confidence scores for the entire segment of the video that + user specified in the request configuration. The default is true. + + ``shot_classification`` : (boolean) Set to true to request shot-level + classification. AutoML Video Intelligence determines the boundaries for + each camera shot in the entire segment of the video that user specified + in the request configuration. AutoML Video Intelligence then returns + labels and their confidence scores for each detected shot, along with + the start and end time of the shot. The default is false. + + WARNING: Model evaluation is not done for this classification type, the + quality of it depends on training data, but there are no metrics + provided to describe that quality. - - For Text Classification: + ``1s_interval_classification`` : (boolean) Set to true to request + classification for a video at one-second intervals. AutoML Video + Intelligence returns labels and their confidence scores for each second + of the entire segment of the video that user specified in the request + configuration. The default is false. - ``score_threshold`` - (float) A value from 0.0 to 1.0. When the model - makes predictions for a text snippet, it will only produce results - that have at least this confidence score. The default is 0.5. + WARNING: Model evaluation is not done for this classification type, the + quality of it depends on training data, but there are no metrics + provided to describe that quality. - - For Image Classification: + AutoML Video Intelligence Object Tracking - ``score_threshold`` - (float) A value from 0.0 to 1.0. When the model - makes predictions for an image, it will only produce results that - have at least this confidence score. The default is 0.5. + ``score_threshold`` : (float) When Model detects objects on video + frames, it will only produce bounding boxes which have at least this + confidence score. Value in 0 to 1 range, default is 0.5. - - For Image Object Detection: + ``max_bounding_box_count`` : (int64) The maximum number of bounding + boxes returned per image. The default is 100, the number of bounding + boxes returned might be limited by the server. - ``score_threshold`` - (float) When Model detects objects on the - image, it will only produce bounding boxes which have at least this - confidence score. Value in 0 to 1 range, default is 0.5. - ``max_bounding_box_count`` - (int64) No more than this number of - bounding boxes will be produced per image. Default is 100, the - requested value may be limited by server. + ``min_bounding_box_size`` : (float) Only bounding boxes with shortest + edge at least that long as a relative value of video frame size are + returned. Value in 0 to 1 range. Default is 0. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. diff --git a/google/cloud/automl_v1/gapic/prediction_service_client_config.py b/google/cloud/automl_v1/gapic/prediction_service_client_config.py index bcfb22ff..e4b1a44f 100644 --- a/google/cloud/automl_v1/gapic/prediction_service_client_config.py +++ b/google/cloud/automl_v1/gapic/prediction_service_client_config.py @@ -1,31 +1,37 @@ config = { "interfaces": { "google.cloud.automl.v1.PredictionService": { - "retry_codes": { - "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], - "non_idempotent": [], - }, + "retry_codes": {"no_retry_2_codes": [], "no_retry_codes": []}, "retry_params": { - "default": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 20000, + "no_retry_params": { + "initial_retry_delay_millis": 0, + "retry_delay_multiplier": 0.0, + "max_retry_delay_millis": 0, + "initial_rpc_timeout_millis": 0, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 0, + "total_timeout_millis": 0, + }, + "no_retry_2_params": { + "initial_retry_delay_millis": 0, + "retry_delay_multiplier": 0.0, + "max_retry_delay_millis": 0, + "initial_rpc_timeout_millis": 60000, "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 20000, - "total_timeout_millis": 600000, - } + "max_rpc_timeout_millis": 60000, + "total_timeout_millis": 60000, + }, }, "methods": { "Predict": { "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "retry_codes_name": "no_retry_2_codes", + "retry_params_name": "no_retry_2_params", }, "BatchPredict": { "timeout_millis": 20000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "retry_codes_name": "no_retry_2_codes", + "retry_params_name": "no_retry_2_params", }, }, } diff --git a/google/cloud/automl_v1/gapic/transports/auto_ml_grpc_transport.py b/google/cloud/automl_v1/gapic/transports/auto_ml_grpc_transport.py index c5f6bfa7..6ebffac5 100644 --- a/google/cloud/automl_v1/gapic/transports/auto_ml_grpc_transport.py +++ b/google/cloud/automl_v1/gapic/transports/auto_ml_grpc_transport.py @@ -54,7 +54,7 @@ def __init__( # exception (channels come with credentials baked in already). if channel is not None and credentials is not None: raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive." + "The `channel` and `credentials` arguments are mutually " "exclusive.", ) # Create the channel. @@ -72,7 +72,9 @@ def __init__( # gRPC uses objects called "stubs" that are bound to the # channel and provide a basic method for each RPC. - self._stubs = {"auto_ml_stub": service_pb2_grpc.AutoMlStub(channel)} + self._stubs = { + "auto_ml_stub": service_pb2_grpc.AutoMlStub(channel), + } # Because this API includes a method that returns a # long-running operation (proto: google.longrunning.Operation), @@ -127,50 +129,50 @@ def create_dataset(self): return self._stubs["auto_ml_stub"].CreateDataset @property - def update_dataset(self): - """Return the gRPC stub for :meth:`AutoMlClient.update_dataset`. + def get_dataset(self): + """Return the gRPC stub for :meth:`AutoMlClient.get_dataset`. - Updates a dataset. + Gets a dataset. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].UpdateDataset + return self._stubs["auto_ml_stub"].GetDataset @property - def get_dataset(self): - """Return the gRPC stub for :meth:`AutoMlClient.get_dataset`. + def list_datasets(self): + """Return the gRPC stub for :meth:`AutoMlClient.list_datasets`. - Gets a dataset. + Lists datasets in a project. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].GetDataset + return self._stubs["auto_ml_stub"].ListDatasets @property - def list_datasets(self): - """Return the gRPC stub for :meth:`AutoMlClient.list_datasets`. + def update_dataset(self): + """Return the gRPC stub for :meth:`AutoMlClient.update_dataset`. - Lists datasets in a project. + Updates a dataset. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].ListDatasets + return self._stubs["auto_ml_stub"].UpdateDataset @property def delete_dataset(self): """Return the gRPC stub for :meth:`AutoMlClient.delete_dataset`. - Deletes a dataset and all of its contents. Returns empty response in the - ``response`` field when it completes, and ``delete_details`` in the + Deletes a dataset and all of its contents. Returns empty response in + the ``response`` field when it completes, and ``delete_details`` in the ``metadata`` field. Returns: @@ -184,7 +186,14 @@ def delete_dataset(self): def import_data(self): """Return the gRPC stub for :meth:`AutoMlClient.import_data`. - Imports data into a dataset. + Imports data into a dataset. For Tables this method can only be + called on an empty Dataset. + + For Tables: + + - A ``schema_inference_version`` parameter must be explicitly set. + Returns an empty response in the ``response`` field when it + completes. Returns: Callable: A callable which accepts the appropriate @@ -197,8 +206,8 @@ def import_data(self): def export_data(self): """Return the gRPC stub for :meth:`AutoMlClient.export_data`. - Exports dataset's data to the provided output location. Returns an empty - response in the ``response`` field when it completes. + Exports dataset's data to the provided output location. Returns an + empty response in the ``response`` field when it completes. Returns: Callable: A callable which accepts the appropriate @@ -250,59 +259,60 @@ def get_model(self): return self._stubs["auto_ml_stub"].GetModel @property - def update_model(self): - """Return the gRPC stub for :meth:`AutoMlClient.update_model`. + def list_models(self): + """Return the gRPC stub for :meth:`AutoMlClient.list_models`. - Updates a model. + Lists models. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].UpdateModel + return self._stubs["auto_ml_stub"].ListModels @property - def list_models(self): - """Return the gRPC stub for :meth:`AutoMlClient.list_models`. + def delete_model(self): + """Return the gRPC stub for :meth:`AutoMlClient.delete_model`. - Lists models. + Deletes a model. Returns ``google.protobuf.Empty`` in the + ``response`` field when it completes, and ``delete_details`` in the + ``metadata`` field. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].ListModels + return self._stubs["auto_ml_stub"].DeleteModel @property - def delete_model(self): - """Return the gRPC stub for :meth:`AutoMlClient.delete_model`. + def update_model(self): + """Return the gRPC stub for :meth:`AutoMlClient.update_model`. - Deletes a model. Returns ``google.protobuf.Empty`` in the ``response`` - field when it completes, and ``delete_details`` in the ``metadata`` - field. + Updates a model. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].DeleteModel + return self._stubs["auto_ml_stub"].UpdateModel @property def deploy_model(self): """Return the gRPC stub for :meth:`AutoMlClient.deploy_model`. - Deploys a model. If a model is already deployed, deploying it with the - same parameters has no effect. Deploying with different parametrs (as - e.g. changing + Deploys a model. If a model is already deployed, deploying it with + the same parameters has no effect. Deploying with different parametrs + (as e.g. changing ``node_number``) will reset the deployment state without pausing the model's availability. - Only applicable for Text Classification, Image Object Detection; all - other domains manage deployment automatically. + Only applicable for Text Classification, Image Object Detection , + Tables, and Image Segmentation; all other domains manage deployment + automatically. Returns an empty response in the ``response`` field when it completes. @@ -320,8 +330,8 @@ def undeploy_model(self): Undeploys a model. If the model is not deployed this method has no effect. - Only applicable for Text Classification, Image Object Detection; all - other domains manage deployment automatically. + Only applicable for Text Classification, Image Object Detection and + Tables; all other domains manage deployment automatically. Returns an empty response in the ``response`` field when it completes. @@ -336,9 +346,9 @@ def undeploy_model(self): def export_model(self): """Return the gRPC stub for :meth:`AutoMlClient.export_model`. - Exports a trained, "export-able", model to a user specified Google Cloud - Storage location. A model is considered export-able if and only if it - has an export format defined for it in ``ModelExportOutputConfig``. + Exports a trained, "export-able", model to a user specified Google + Cloud Storage location. A model is considered export-able if and only if + it has an export format defined for it in ``ModelExportOutputConfig``. Returns an empty response in the ``response`` field when it completes. diff --git a/google/cloud/automl_v1/gapic/transports/prediction_service_grpc_transport.py b/google/cloud/automl_v1/gapic/transports/prediction_service_grpc_transport.py index 9d494540..c94538be 100644 --- a/google/cloud/automl_v1/gapic/transports/prediction_service_grpc_transport.py +++ b/google/cloud/automl_v1/gapic/transports/prediction_service_grpc_transport.py @@ -54,7 +54,7 @@ def __init__( # exception (channels come with credentials baked in already). if channel is not None and credentials is not None: raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive." + "The `channel` and `credentials` arguments are mutually " "exclusive.", ) # Create the channel. @@ -75,7 +75,7 @@ def __init__( self._stubs = { "prediction_service_stub": prediction_service_pb2_grpc.PredictionServiceStub( channel - ) + ), } # Because this API includes a method that returns a @@ -121,22 +121,41 @@ def channel(self): def predict(self): """Return the gRPC stub for :meth:`PredictionServiceClient.predict`. - Perform an online prediction. The prediction result will be directly - returned in the response. Available for following ML problems, and their - expected request payloads: - - - Image Classification - Image in .JPEG, .GIF or .PNG format, - image\_bytes up to 30MB. - - Image Object Detection - Image in .JPEG, .GIF or .PNG format, - image\_bytes up to 30MB. - - Text Classification - TextSnippet, content up to 60,000 characters, - UTF-8 encoded. - - Text Extraction - TextSnippet, content up to 30,000 characters, UTF-8 - NFC encoded. - - Translation - TextSnippet, content up to 25,000 characters, UTF-8 - encoded. - - Text Sentiment - TextSnippet, content up 500 characters, UTF-8 - encoded. + Perform an online prediction. The prediction result is directly + returned in the response. Available for following ML scenarios, and + their expected request payloads: + + AutoML Vision Classification + + - An image in .JPEG, .GIF or .PNG format, image_bytes up to 30MB. + + AutoML Vision Object Detection + + - An image in .JPEG, .GIF or .PNG format, image_bytes up to 30MB. + + AutoML Natural Language Classification + + - A TextSnippet up to 60,000 characters, UTF-8 encoded or a document in + .PDF, .TIF or .TIFF format with size upto 2MB. + + AutoML Natural Language Entity Extraction + + - A TextSnippet up to 10,000 characters, UTF-8 NFC encoded or a + document in .PDF, .TIF or .TIFF format with size upto 20MB. + + AutoML Natural Language Sentiment Analysis + + - A TextSnippet up to 60,000 characters, UTF-8 encoded or a document in + .PDF, .TIF or .TIFF format with size upto 2MB. + + AutoML Translation + + - A TextSnippet up to 25,000 characters, UTF-8 encoded. + + AutoML Tables + + - A row with column values matching the columns of the model, up to + 5MB. Not available for FORECASTING ``prediction_type``. Returns: Callable: A callable which accepts the appropriate @@ -154,11 +173,16 @@ def batch_predict(self): Instead, a long running operation object is returned. User can poll the operation result via ``GetOperation`` method. Once the operation is done, ``BatchPredictResult`` is returned in the ``response`` field. - Available for following ML problems: - - - Image Classification - - Image Object Detection - - Text Extraction + Available for following ML scenarios: + + - AutoML Vision Classification + - AutoML Vision Object Detection + - AutoML Video Intelligence Classification + - AutoML Video Intelligence Object Tracking \* AutoML Natural Language + Classification + - AutoML Natural Language Entity Extraction + - AutoML Natural Language Sentiment Analysis + - AutoML Tables Returns: Callable: A callable which accepts the appropriate diff --git a/google/cloud/automl_v1/proto/annotation_payload.proto b/google/cloud/automl_v1/proto/annotation_payload.proto index 980c0e36..a81feaf1 100644 --- a/google/cloud/automl_v1/proto/annotation_payload.proto +++ b/google/cloud/automl_v1/proto/annotation_payload.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; diff --git a/google/cloud/automl_v1/proto/annotation_payload_pb2.py b/google/cloud/automl_v1/proto/annotation_payload_pb2.py index 7840971f..8cdfd04f 100644 --- a/google/cloud/automl_v1/proto/annotation_payload_pb2.py +++ b/google/cloud/automl_v1/proto/annotation_payload_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1/proto/annotation_payload.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -38,12 +35,9 @@ name="google/cloud/automl_v1/proto/annotation_payload.proto", package="google.cloud.automl.v1", syntax="proto3", - serialized_options=_b( - "\n\032com.google.cloud.automl.v1P\001Z // // +//

AutoML Video Intelligence

+// +// +//
Classification
+// +// See [Preparing your training +// data](https://cloud.google.com/video-intelligence/automl/docs/prepare) for +// more information. +// +// CSV file(s) with each line in format: +// +// ML_USE,GCS_FILE_PATH +// +// For `ML_USE`, do not use `VALIDATE`. +// +// `GCS_FILE_PATH` is the path to another .csv file that describes training +// example for a given `ML_USE`, using the following row format: +// +// GCS_FILE_PATH,(LABEL,TIME_SEGMENT_START,TIME_SEGMENT_END | ,,) +// +// Here `GCS_FILE_PATH` leads to a video of up to 50GB in size and up +// to 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI. +// +// `TIME_SEGMENT_START` and `TIME_SEGMENT_END` must be within the +// length of the video, and the end time must be after the start time. Any +// segment of a video which has one or more labels on it, is considered a +// hard negative for all other labels. Any segment with no labels on +// it is considered to be unknown. If a whole video is unknown, then +// it should be mentioned just once with ",," in place of `LABEL, +// TIME_SEGMENT_START,TIME_SEGMENT_END`. +// +// Sample top level CSV file: +// +// TRAIN,gs://folder/train_videos.csv +// TEST,gs://folder/test_videos.csv +// UNASSIGNED,gs://folder/other_videos.csv +// +// Sample rows of a CSV file for a particular ML_USE: +// +// gs://folder/video1.avi,car,120,180.000021 +// gs://folder/video1.avi,bike,150,180.000021 +// gs://folder/vid2.avi,car,0,60.5 +// gs://folder/vid3.avi,,, +// +// +// +//
Object Tracking
+// +// See [Preparing your training +// data](/video-intelligence/automl/object-tracking/docs/prepare) for more +// information. +// +// CSV file(s) with each line in format: +// +// ML_USE,GCS_FILE_PATH +// +// For `ML_USE`, do not use `VALIDATE`. +// +// `GCS_FILE_PATH` is the path to another .csv file that describes training +// example for a given `ML_USE`, using the following row format: +// +// GCS_FILE_PATH,LABEL,[INSTANCE_ID],TIMESTAMP,BOUNDING_BOX +// +// or +// +// GCS_FILE_PATH,,,,,,,,,, +// +// Here `GCS_FILE_PATH` leads to a video of up to 50GB in size and up +// to 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI. +// Providing `INSTANCE_ID`s can help to obtain a better model. When +// a specific labeled entity leaves the video frame, and shows up +// afterwards it is not required, albeit preferable, that the same +// `INSTANCE_ID` is given to it. +// +// `TIMESTAMP` must be within the length of the video, the +// `BOUNDING_BOX` is assumed to be drawn on the closest video's frame +// to the `TIMESTAMP`. Any mentioned by the `TIMESTAMP` frame is expected +// to be exhaustively labeled and no more than 500 `BOUNDING_BOX`-es per +// frame are allowed. If a whole video is unknown, then it should be +// mentioned just once with ",,,,,,,,,," in place of `LABEL, +// [INSTANCE_ID],TIMESTAMP,BOUNDING_BOX`. +// +// Sample top level CSV file: +// +// TRAIN,gs://folder/train_videos.csv +// TEST,gs://folder/test_videos.csv +// UNASSIGNED,gs://folder/other_videos.csv +// +// Seven sample rows of a CSV file for a particular ML_USE: +// +// gs://folder/video1.avi,car,1,12.10,0.8,0.8,0.9,0.8,0.9,0.9,0.8,0.9 +// gs://folder/video1.avi,car,1,12.90,0.4,0.8,0.5,0.8,0.5,0.9,0.4,0.9 +// gs://folder/video1.avi,car,2,12.10,.4,.2,.5,.2,.5,.3,.4,.3 +// gs://folder/video1.avi,car,2,12.90,.8,.2,,,.9,.3,, +// gs://folder/video1.avi,bike,,12.50,.45,.45,,,.55,.55,, +// gs://folder/video2.avi,car,1,0,.1,.9,,,.9,.1,, +// gs://folder/video2.avi,,,,,,,,,,, +//
+//
+// +// //

AutoML Natural Language

// // @@ -223,9 +322,11 @@ option ruby_package = "Google::Cloud::AutoML::V1"; // **JSONL files that reference documents** // // .JSONL files contain, per line, a JSON document that wraps a -// `input_config` that contains the path to a source PDF document. +// `input_config` that contains the path to a source document. // Multiple JSON documents can be separated using line breaks (\n). // +// Supported document extensions: .PDF, .TIF, .TIFF +// // For example: // // { @@ -239,19 +340,19 @@ option ruby_package = "Google::Cloud::AutoML::V1"; // { // "document": { // "input_config": { -// "gcs_source": { "input_uris": [ "gs://folder/document2.pdf" ] +// "gcs_source": { "input_uris": [ "gs://folder/document2.tif" ] // } // } // } // } // -// **In-line JSONL files with PDF layout information** +// **In-line JSONL files with document layout information** // -// **Note:** You can only annotate PDF files using the UI. The format described -// below applies to annotated PDF files exported using the UI or `exportData`. +// **Note:** You can only annotate documents using the UI. The format described +// below applies to annotated documents exported using the UI or `exportData`. // -// In-line .JSONL files for PDF documents contain, per line, a JSON document -// that wraps a `document` field that provides the textual content of the PDF +// In-line .JSONL files for documents contain, per line, a JSON document +// that wraps a `document` field that provides the textual content of the // document and the layout information. // // For example: @@ -342,8 +443,9 @@ option ruby_package = "Google::Cloud::AutoML::V1"; // 10MB or less in size. // // For the `MULTICLASS` classification type, at most one `LABEL` is allowed. +// // The `ML_USE` and `LABEL` columns are optional. -// Supported file extensions: .TXT, .PDF, .ZIP +// Supported file extensions: .TXT, .PDF, .TIF, .TIFF, .ZIP // // A maximum of 100 unique labels are allowed per CSV row. // @@ -388,7 +490,7 @@ option ruby_package = "Google::Cloud::AutoML::V1"; // 128kB or less in size. // // The `ML_USE` and `SENTIMENT` columns are optional. -// Supported file extensions: .TXT, .PDF, .ZIP +// Supported file extensions: .TXT, .PDF, .TIF, .TIFF, .ZIP // // * `SENTIMENT` - An integer between 0 and // Dataset.text_sentiment_dataset_metadata.sentiment_max @@ -417,6 +519,54 @@ option ruby_package = "Google::Cloud::AutoML::V1"; // // // +// +//

AutoML Tables

+// +// See [Preparing your training +// data](https://cloud.google.com/automl-tables/docs/prepare) for more +// information. +// +// You can use either +// [gcs_source][google.cloud.automl.v1.InputConfig.gcs_source] or +// [bigquery_source][google.cloud.automl.v1.InputConfig.bigquery_source]. +// All input is concatenated into a +// single +// +// [primary_table_spec_id][google.cloud.automl.v1.TablesDatasetMetadata.primary_table_spec_id] +// +// **For gcs_source:** +// +// CSV file(s), where the first row of the first file is the header, +// containing unique column names. If the first row of a subsequent +// file is the same as the header, then it is also treated as a +// header. All other rows contain values for the corresponding +// columns. +// +// Each .CSV file by itself must be 10GB or smaller, and their total +// size must be 100GB or smaller. +// +// First three sample rows of a CSV file: +//
+// "Id","First Name","Last Name","Dob","Addresses"
+//
+// "1","John","Doe","1968-01-22","[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]"
+//
+// "2","Jane","Doe","1980-10-16","[{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]}
+// 
+// **For bigquery_source:** +// +// An URI of a BigQuery table. The user data size of the BigQuery +// table must be 100GB or smaller. +// +// An imported table must have between 2 and 1,000 columns, inclusive, +// and between 1000 and 100,000,000 rows, inclusive. There are at most 5 +// import data running in parallel. +// +//
+//
+// +// // **Input field definitions:** // // `ML_USE` @@ -435,6 +585,11 @@ option ruby_package = "Google::Cloud::AutoML::V1"; // For each label an AnnotationSpec is created which display_name // becomes the label; AnnotationSpecs are given back in predictions. // +// `INSTANCE_ID` +// : A positive integer that identifies a specific instance of a +// labeled entity on an example. Used e.g. to track two cars on +// a video while being able to tell apart which one is which. +// // `BOUNDING_BOX` // : (`VERTEX,VERTEX,VERTEX,VERTEX` | `VERTEX,,,VERTEX,,`) // A rectangle parallel to the frame of the example (image, @@ -452,6 +607,23 @@ option ruby_package = "Google::Cloud::AutoML::V1"; // leading non-decimal 0 can be omitted (i.e. 0.3 = .3). // Point 0,0 is in top left. // +// `TIME_SEGMENT_START` +// : (`TIME_OFFSET`) +// Expresses a beginning, inclusive, of a time segment +// within an example that has a time dimension +// (e.g. video). +// +// `TIME_SEGMENT_END` +// : (`TIME_OFFSET`) +// Expresses an end, exclusive, of a time segment within +// n example that has a time dimension (e.g. video). +// +// `TIME_OFFSET` +// : A number of seconds as measured from the start of an +// example (e.g. video). Fractions are allowed, up to a +// microsecond precision. "inf" is allowed, and it means the end +// of the example. +// // `TEXT_SNIPPET` // : The content of a text snippet, UTF-8 encoded, enclosed within // double quotes (""). @@ -473,15 +645,22 @@ message InputConfig { // The source of the input. oneof source { // The Google Cloud Storage location for the input content. - // For [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData], - // `gcs_source` points to a CSV file with a structure described in - // [InputConfig][google.cloud.automl.v1.InputConfig]. + // For [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData], `gcs_source` points to a CSV file with + // a structure described in [InputConfig][google.cloud.automl.v1.InputConfig]. GcsSource gcs_source = 1; } // Additional domain-specific parameters describing the semantic of the // imported data, any string must be up to 25000 // characters long. + // + //

AutoML Tables

+ // + // `schema_inference_version` + // : (integer) This value must be supplied. + // The version of the + // algorithm to use for the initial inference of the + // column data types of the imported table. Allowed values: "1". map params = 2; } @@ -496,6 +675,82 @@ message InputConfig { // non-terminal symbols defined near the end of this comment. The formats // are: // +//

AutoML Vision

+//
Classification
+// +// One or more CSV files where each line is a single column: +// +// GCS_FILE_PATH +// +// The Google Cloud Storage location of an image of up to +// 30MB in size. Supported extensions: .JPEG, .GIF, .PNG. +// This path is treated as the ID in the batch predict output. +// +// Sample rows: +// +// gs://folder/image1.jpeg +// gs://folder/image2.gif +// gs://folder/image3.png +// +//
Object Detection
+// +// One or more CSV files where each line is a single column: +// +// GCS_FILE_PATH +// +// The Google Cloud Storage location of an image of up to +// 30MB in size. Supported extensions: .JPEG, .GIF, .PNG. +// This path is treated as the ID in the batch predict output. +// +// Sample rows: +// +// gs://folder/image1.jpeg +// gs://folder/image2.gif +// gs://folder/image3.png +//
+//
+// +//

AutoML Video Intelligence

+//
Classification
+// +// One or more CSV files where each line is a single column: +// +// GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END +// +// `GCS_FILE_PATH` is the Google Cloud Storage location of video up to 50GB in +// size and up to 3h in duration duration. +// Supported extensions: .MOV, .MPEG4, .MP4, .AVI. +// +// `TIME_SEGMENT_START` and `TIME_SEGMENT_END` must be within the +// length of the video, and the end time must be after the start time. +// +// Sample rows: +// +// gs://folder/video1.mp4,10,40 +// gs://folder/video1.mp4,20,60 +// gs://folder/vid2.mov,0,inf +// +//
Object Tracking
+// +// One or more CSV files where each line is a single column: +// +// GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END +// +// `GCS_FILE_PATH` is the Google Cloud Storage location of video up to 50GB in +// size and up to 3h in duration duration. +// Supported extensions: .MOV, .MPEG4, .MP4, .AVI. +// +// `TIME_SEGMENT_START` and `TIME_SEGMENT_END` must be within the +// length of the video, and the end time must be after the start time. +// +// Sample rows: +// +// gs://folder/video1.mp4,10,40 +// gs://folder/video1.mp4,20,60 +// gs://folder/vid2.mov,0,inf +//
+//
+// //

AutoML Natural Language

//
Classification
// @@ -504,13 +759,15 @@ message InputConfig { // GCS_FILE_PATH // // `GCS_FILE_PATH` is the Google Cloud Storage location of a text file. -// Supported file extensions: .TXT, .PDF +// Supported file extensions: .TXT, .PDF, .TIF, .TIFF +// // Text files can be no larger than 10MB in size. // // Sample rows: // // gs://folder/text1.txt // gs://folder/text2.pdf +// gs://folder/text3.tif // //
Sentiment Analysis
// One or more CSV files where each line is a single column: @@ -518,13 +775,15 @@ message InputConfig { // GCS_FILE_PATH // // `GCS_FILE_PATH` is the Google Cloud Storage location of a text file. -// Supported file extensions: .TXT, .PDF +// Supported file extensions: .TXT, .PDF, .TIF, .TIFF +// // Text files can be no larger than 128kB in size. // // Sample rows: // // gs://folder/text1.txt // gs://folder/text2.pdf +// gs://folder/text3.tif // //
Entity Extraction
// @@ -540,9 +799,10 @@ message InputConfig { // be UTF-8 NFC encoded (ASCII already is). The IDs provided should be // unique. // -// Each document JSONL file contains, per line, a proto that wraps a -// Document proto with `input_config` set. Only PDF documents are -// currently supported, and each PDF document cannot exceed 2MB in size. +// Each document JSONL file contains, per line, a proto that wraps a Document +// proto with `input_config` set. Each document cannot exceed 2MB in size. +// +// Supported document extensions: .PDF, .TIF, .TIFF // // Each JSONL file must not exceed 100MB in size, and no more than 20 // JSONL files may be passed. @@ -590,7 +850,7 @@ message InputConfig { // { // "document": { // "input_config": { -// "gcs_source": { "input_uris": [ "gs://folder/document2.pdf" ] +// "gcs_source": { "input_uris": [ "gs://folder/document2.tif" ] // } // } // } @@ -598,12 +858,83 @@ message InputConfig { //
//
// +//

AutoML Tables

+// +// See [Preparing your training +// data](https://cloud.google.com/automl-tables/docs/predict-batch) for more +// information. +// +// You can use either +// [gcs_source][google.cloud.automl.v1.BatchPredictInputConfig.gcs_source] +// or +// [bigquery_source][BatchPredictInputConfig.bigquery_source]. +// +// **For gcs_source:** +// +// CSV file(s), each by itself 10GB or smaller and total size must be +// 100GB or smaller, where first file must have a header containing +// column names. If the first row of a subsequent file is the same as +// the header, then it is also treated as a header. All other rows +// contain values for the corresponding columns. +// +// The column names must contain the model's +// +// [input_feature_column_specs'][google.cloud.automl.v1.TablesModelMetadata.input_feature_column_specs] +// [display_name-s][google.cloud.automl.v1.ColumnSpec.display_name] +// (order doesn't matter). The columns corresponding to the model's +// input feature column specs must contain values compatible with the +// column spec's data types. Prediction on all the rows, i.e. the CSV +// lines, will be attempted. +// +// +// Sample rows from a CSV file: +//
+// "First Name","Last Name","Dob","Addresses"
+//
+// "John","Doe","1968-01-22","[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]"
+//
+// "Jane","Doe","1980-10-16","[{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]}
+// 
+// **For bigquery_source:** +// +// The URI of a BigQuery table. The user data size of the BigQuery +// table must be 100GB or smaller. +// +// The column names must contain the model's +// +// [input_feature_column_specs'][google.cloud.automl.v1.TablesModelMetadata.input_feature_column_specs] +// [display_name-s][google.cloud.automl.v1.ColumnSpec.display_name] +// (order doesn't matter). The columns corresponding to the model's +// input feature column specs must contain values compatible with the +// column spec's data types. Prediction on all the rows of the table +// will be attempted. +//
+//
+// // **Input field definitions:** // // `GCS_FILE_PATH` // : The path to a file on Google Cloud Storage. For example, // "gs://folder/video.avi". // +// `TIME_SEGMENT_START` +// : (`TIME_OFFSET`) +// Expresses a beginning, inclusive, of a time segment +// within an example that has a time dimension +// (e.g. video). +// +// `TIME_SEGMENT_END` +// : (`TIME_OFFSET`) +// Expresses an end, exclusive, of a time segment within +// n example that has a time dimension (e.g. video). +// +// `TIME_OFFSET` +// : A number of seconds as measured from the start of an +// example (e.g. video). Fractions are allowed, up to a +// microsecond precision. "inf" is allowed, and it means the end +// of the example. +// // **Errors:** // // If any of the provided CSV files can't be parsed or if more than certain @@ -630,82 +961,43 @@ message DocumentInputConfig { GcsSource gcs_source = 1; } -// Output configuration for ExportData. -// -// As destination the -// [gcs_destination][google.cloud.automl.v1.OutputConfig.gcs_destination] -// must be set unless specified otherwise for a domain. If gcs_destination is -// set then in the given directory a new directory is created. Its name -// will be "export_data--", -// where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. -// Only ground truth annotations are exported (not approved annotations are -// not exported). -// -// The outputs correspond to how the data was imported, and may be used as -// input to import data. The output formats are represented as EBNF with literal -// commas and same non-terminal symbols definitions are these in import data's -// [InputConfig][google.cloud.automl.v1.InputConfig]: -// -// * For Image Classification: -// CSV file(s) `image_classification_1.csv`, -// `image_classification_2.csv`,...,`image_classification_N.csv`with -// each line in format: -// ML_USE,GCS_FILE_PATH,LABEL,LABEL,... -// where GCS_FILE_PATHs point at the original, source locations of the -// imported images. -// For MULTICLASS classification type, there can be at most one LABEL -// per example. -// -// * For Image Object Detection: -// CSV file(s) `image_object_detection_1.csv`, -// `image_object_detection_2.csv`,...,`image_object_detection_N.csv` -// with each line in format: -// ML_USE,GCS_FILE_PATH,[LABEL],(BOUNDING_BOX | ,,,,,,,) -// where GCS_FILE_PATHs point at the original, source locations of the -// imported images. -// -// * For Text Classification: -// In the created directory CSV file(s) `text_classification_1.csv`, -// `text_classification_2.csv`, ...,`text_classification_N.csv` will be -// created where N depends on the total number of examples exported. -// Each line in the CSV is of the format: -// ML_USE,GCS_FILE_PATH,LABEL,LABEL,... -// where GCS_FILE_PATHs point at the exported .txt files containing -// the text content of the imported example. For MULTICLASS -// classification type, there will be at most one LABEL per example. -// -// * For Text Sentiment: -// In the created directory CSV file(s) `text_sentiment_1.csv`, -// `text_sentiment_2.csv`, ...,`text_sentiment_N.csv` will be -// created where N depends on the total number of examples exported. -// Each line in the CSV is of the format: -// ML_USE,GCS_FILE_PATH,SENTIMENT -// where GCS_FILE_PATHs point at the exported .txt files containing -// the text content of the imported example. -// -// * For Text Extraction: -// CSV file `text_extraction.csv`, with each line in format: -// ML_USE,GCS_FILE_PATH -// GCS_FILE_PATH leads to a .JSONL (i.e. JSON Lines) file which -// contains, per line, a proto that wraps a TextSnippet proto (in json -// representation) followed by AnnotationPayload protos (called -// annotations). If initially documents had been imported, the JSONL -// will point at the original, source locations of the imported -// documents. -// -// * For Translation: +// * For Translation: // CSV file `translation.csv`, with each line in format: // ML_USE,GCS_FILE_PATH // GCS_FILE_PATH leads to a .TSV file which describes examples that have // given ML_USE, using the following row format per line: // TEXT_SNIPPET (in source language) \t TEXT_SNIPPET (in target // language) +// +// * For Tables: +// Output depends on whether the dataset was imported from Google Cloud +// Storage or BigQuery. +// Google Cloud Storage case: +// +// [gcs_destination][google.cloud.automl.v1p1beta.OutputConfig.gcs_destination] +// must be set. Exported are CSV file(s) `tables_1.csv`, +// `tables_2.csv`,...,`tables_N.csv` with each having as header line +// the table's column names, and all other lines contain values for +// the header columns. +// BigQuery case: +// +// [bigquery_destination][google.cloud.automl.v1p1beta.OutputConfig.bigquery_destination] +// pointing to a BigQuery project must be set. In the given project a +// new dataset will be created with name +// +// `export_data__` +// where will be made +// BigQuery-dataset-name compatible (e.g. most special characters will +// become underscores), and timestamp will be in +// YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In that +// dataset a new table called `primary_table` will be created, and +// filled with precisely the same data as this obtained on import. message OutputConfig { // The destination of the output. oneof destination { - // Required. The Google Cloud Storage location where the output is to be - // written to. For Image Object Detection, Text Extraction in the given - // directory a new directory will be created with name: + // Required. The Google Cloud Storage location where the output is to be written to. + // For Image Object Detection, Text Extraction, Video Classification and + // Tables, in the given directory a new directory will be created with name: // export_data-- where // timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. All export // output will be written into that directory. @@ -725,6 +1017,101 @@ message OutputConfig { // where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. The contents // of it depends on the ML problem the predictions are made for. // +// * For Image Classification: +// In the created directory files `image_classification_1.jsonl`, +// `image_classification_2.jsonl`,...,`image_classification_N.jsonl` +// will be created, where N may be 1, and depends on the +// total number of the successfully predicted images and annotations. +// A single image will be listed only once with all its annotations, +// and its annotations will never be split across files. +// Each .JSONL file will contain, per line, a JSON representation of a +// proto that wraps image's "ID" : "" followed by a list of +// zero or more AnnotationPayload protos (called annotations), which +// have classification detail populated. +// If prediction for any image failed (partially or completely), then an +// additional `errors_1.jsonl`, `errors_2.jsonl`,..., `errors_N.jsonl` +// files will be created (N depends on total number of failed +// predictions). These files will have a JSON representation of a proto +// that wraps the same "ID" : "" but here followed by +// exactly one +// +// [`google.rpc.Status`](https: +// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) +// containing only `code` and `message`fields. +// +// * For Image Object Detection: +// In the created directory files `image_object_detection_1.jsonl`, +// `image_object_detection_2.jsonl`,...,`image_object_detection_N.jsonl` +// will be created, where N may be 1, and depends on the +// total number of the successfully predicted images and annotations. +// Each .JSONL file will contain, per line, a JSON representation of a +// proto that wraps image's "ID" : "" followed by a list of +// zero or more AnnotationPayload protos (called annotations), which +// have image_object_detection detail populated. A single image will +// be listed only once with all its annotations, and its annotations +// will never be split across files. +// If prediction for any image failed (partially or completely), then +// additional `errors_1.jsonl`, `errors_2.jsonl`,..., `errors_N.jsonl` +// files will be created (N depends on total number of failed +// predictions). These files will have a JSON representation of a proto +// that wraps the same "ID" : "" but here followed by +// exactly one +// +// [`google.rpc.Status`](https: +// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) +// containing only `code` and `message`fields. +// * For Video Classification: +// In the created directory a video_classification.csv file, and a .JSON +// file per each video classification requested in the input (i.e. each +// line in given CSV(s)), will be created. +// +// The format of video_classification.csv is: +// +// GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS +// where: +// GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to 1 +// the prediction input lines (i.e. video_classification.csv has +// precisely the same number of lines as the prediction input had.) +// JSON_FILE_NAME = Name of .JSON file in the output directory, which +// contains prediction responses for the video time segment. +// STATUS = "OK" if prediction completed successfully, or an error code +// with message otherwise. If STATUS is not "OK" then the .JSON file +// for that line may not exist or be empty. +// +// Each .JSON file, assuming STATUS is "OK", will contain a list of +// AnnotationPayload protos in JSON format, which are the predictions +// for the video time segment the file is assigned to in the +// video_classification.csv. All AnnotationPayload protos will have +// video_classification field set, and will be sorted by +// video_classification.type field (note that the returned types are +// governed by `classifaction_types` parameter in +// [PredictService.BatchPredictRequest.params][]). +// +// * For Video Object Tracking: +// In the created directory a video_object_tracking.csv file will be +// created, and multiple files video_object_trackinng_1.json, +// video_object_trackinng_2.json,..., video_object_trackinng_N.json, +// where N is the number of requests in the input (i.e. the number of +// lines in given CSV(s)). +// +// The format of video_object_tracking.csv is: +// +// GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS +// where: +// GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to 1 +// the prediction input lines (i.e. video_object_tracking.csv has +// precisely the same number of lines as the prediction input had.) +// JSON_FILE_NAME = Name of .JSON file in the output directory, which +// contains prediction responses for the video time segment. +// STATUS = "OK" if prediction completed successfully, or an error +// code with message otherwise. If STATUS is not "OK" then the .JSON +// file for that line may not exist or be empty. +// +// Each .JSON file, assuming STATUS is "OK", will contain a list of +// AnnotationPayload protos in JSON format, which are the predictions +// for each frame of the video time segment the file is assigned to in +// video_object_tracking.csv. All AnnotationPayload protos will have +// video_object_tracking field set. // * For Text Classification: // In the created directory files `text_classification_1.jsonl`, // `text_classification_2.jsonl`,...,`text_classification_N.jsonl` @@ -732,18 +1119,18 @@ message OutputConfig { // total number of inputs and annotations found. // // Each .JSONL file will contain, per line, a JSON representation of a -// proto that wraps input text (or pdf) file in +// proto that wraps input text file (or document) in // the text snippet (or document) proto and a list of // zero or more AnnotationPayload protos (called annotations), which -// have classification detail populated. A single text (or pdf) file -// will be listed only once with all its annotations, and its +// have classification detail populated. A single text file (or +// document) will be listed only once with all its annotations, and its // annotations will never be split across files. // -// If prediction for any text (or pdf) file failed (partially or +// If prediction for any input file (or document) failed (partially or // completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,..., // `errors_N.jsonl` files will be created (N depends on total number of // failed predictions). These files will have a JSON representation of a -// proto that wraps input text (or pdf) file followed by exactly one +// proto that wraps input file followed by exactly one // // [`google.rpc.Status`](https: // //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) @@ -756,18 +1143,18 @@ message OutputConfig { // total number of inputs and annotations found. // // Each .JSONL file will contain, per line, a JSON representation of a -// proto that wraps input text (or pdf) file in +// proto that wraps input text file (or document) in // the text snippet (or document) proto and a list of // zero or more AnnotationPayload protos (called annotations), which -// have text_sentiment detail populated. A single text (or pdf) file -// will be listed only once with all its annotations, and its +// have text_sentiment detail populated. A single text file (or +// document) will be listed only once with all its annotations, and its // annotations will never be split across files. // -// If prediction for any text (or pdf) file failed (partially or +// If prediction for any input file (or document) failed (partially or // completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,..., // `errors_N.jsonl` files will be created (N depends on total number of // failed predictions). These files will have a JSON representation of a -// proto that wraps input text (or pdf) file followed by exactly one +// proto that wraps input file followed by exactly one // // [`google.rpc.Status`](https: // //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) @@ -803,14 +1190,108 @@ message OutputConfig { // failed predictions). These files will have a JSON representation of a // proto that wraps either the "id" : "" (in case of inline) // or the document proto (in case of document) but here followed by -// exactly one [`google.rpc.Status`](https: +// exactly one +// +// [`google.rpc.Status`](https: // //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) // containing only `code` and `message`. +// +// * For Tables: +// Output depends on whether +// +// [gcs_destination][google.cloud.automl.v1p1beta.BatchPredictOutputConfig.gcs_destination] +// or +// +// [bigquery_destination][google.cloud.automl.v1p1beta.BatchPredictOutputConfig.bigquery_destination] +// is set (either is allowed). +// Google Cloud Storage case: +// In the created directory files `tables_1.csv`, `tables_2.csv`,..., +// `tables_N.csv` will be created, where N may be 1, and depends on +// the total number of the successfully predicted rows. +// For all CLASSIFICATION +// +// [prediction_type-s][google.cloud.automl.v1p1beta.TablesModelMetadata.prediction_type]: +// Each .csv file will contain a header, listing all columns' +// +// [display_name-s][google.cloud.automl.v1p1beta.ColumnSpec.display_name] +// given on input followed by M target column names in the format of +// +// "<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec] +// +// [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>__score" where M is the number of distinct target values, +// i.e. number of distinct values in the target column of the table +// used to train the model. Subsequent lines will contain the +// respective values of successfully predicted rows, with the last, +// i.e. the target, columns having the corresponding prediction +// [scores][google.cloud.automl.v1p1beta.TablesAnnotation.score]. +// For REGRESSION and FORECASTING +// +// [prediction_type-s][google.cloud.automl.v1p1beta.TablesModelMetadata.prediction_type]: +// Each .csv file will contain a header, listing all columns' +// [display_name-s][google.cloud.automl.v1p1beta.display_name] +// given on input followed by the predicted target column with name +// in the format of +// +// "predicted_<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec] +// +// [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>" +// Subsequent lines will contain the respective values of +// successfully predicted rows, with the last, i.e. the target, +// column having the predicted target value. +// If prediction for any rows failed, then an additional +// `errors_1.csv`, `errors_2.csv`,..., `errors_N.csv` will be +// created (N depends on total number of failed rows). These files +// will have analogous format as `tables_*.csv`, but always with a +// single target column having +// +// [`google.rpc.Status`](https: +// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) +// represented as a JSON string, and containing only `code` and +// `message`. +// BigQuery case: +// +// [bigquery_destination][google.cloud.automl.v1p1beta.OutputConfig.bigquery_destination] +// pointing to a BigQuery project must be set. In the given project a +// new dataset will be created with name +// `prediction__` +// where will be made +// BigQuery-dataset-name compatible (e.g. most special characters will +// become underscores), and timestamp will be in +// YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the dataset +// two tables will be created, `predictions`, and `errors`. +// The `predictions` table's column names will be the input columns' +// +// [display_name-s][google.cloud.automl.v1p1beta.ColumnSpec.display_name] +// followed by the target column with name in the format of +// +// "predicted_<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec] +// +// [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>" +// The input feature columns will contain the respective values of +// successfully predicted rows, with the target column having an +// ARRAY of +// +// [AnnotationPayloads][google.cloud.automl.v1p1beta.AnnotationPayload], +// represented as STRUCT-s, containing +// [TablesAnnotation][google.cloud.automl.v1p1beta.TablesAnnotation]. +// The `errors` table contains rows for which the prediction has +// failed, it has analogous input columns while the target column name +// is in the format of +// +// "errors_<[target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata.target_column_spec] +// +// [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>", +// and as a value has +// +// [`google.rpc.Status`](https: +// //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto) +// represented as a STRUCT, and containing only `code` and `message`. message BatchPredictOutputConfig { // The destination of the output. oneof destination { - // Required. The Google Cloud Storage location of the directory where the - // output is to be written to. + // Required. The Google Cloud Storage location of the directory where the output is to + // be written to. GcsDestination gcs_destination = 1 [(google.api.field_behavior) = REQUIRED]; } } @@ -819,9 +1300,8 @@ message BatchPredictOutputConfig { message ModelExportOutputConfig { // The destination of the output. oneof destination { - // Required. The Google Cloud Storage location where the model is to be - // written to. This location may only be set for the following model - // formats: + // Required. The Google Cloud Storage location where the model is to be written to. + // This location may only be set for the following model formats: // "tflite", "edgetpu_tflite", "tf_saved_model", "tf_js", "core_ml". // // Under the directory given as the destination a new one with name @@ -839,7 +1319,8 @@ message ModelExportOutputConfig { // // * For Image Classification mobile-low-latency-1, mobile-versatile-1, // mobile-high-accuracy-1: - // "tflite" (default), "edgetpu_tflite", "tf_saved_model", "tf_js". + // "tflite" (default), "edgetpu_tflite", "tf_saved_model", "tf_js", + // "docker". // // * For Image Classification mobile-core-ml-low-latency-1, // mobile-core-ml-versatile-1, mobile-core-ml-high-accuracy-1: @@ -855,13 +1336,24 @@ message ModelExportOutputConfig { // devices. // * tf_saved_model - A tensorflow model in SavedModel format. // * tf_js - A [TensorFlow.js](https://www.tensorflow.org/js) model that can - // be used in the browser and in Node.js using JavaScript.x` + // be used in the browser and in Node.js using JavaScript. + // * docker - Used for Docker containers. Use the params field to customize + // the container. The container is verified to work correctly on + // ubuntu 16.04 operating system. See more at + // [containers + // + // quickstart](https: + // //cloud.google.com/vision/automl/docs/containers-gcs-quickstart) // * core_ml - Used for iOS mobile devices. string model_format = 4; // Additional model-type and format specific parameters describing the // requirements for the to be exported model files, any string must be up to // 25000 characters long. + // + // * For `docker` format: + // `cpu_architecture` - (string) "x86_64" (default). + // `gpu_architecture` - (string) "none" (default), "nvidia". map params = 2; } diff --git a/google/cloud/automl_v1/proto/io_pb2.py b/google/cloud/automl_v1/proto/io_pb2.py index b0a5f000..8a784f96 100644 --- a/google/cloud/automl_v1/proto/io_pb2.py +++ b/google/cloud/automl_v1/proto/io_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1/proto/io.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -15,23 +12,20 @@ _sym_db = _symbol_database.Default() -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name="google/cloud/automl_v1/proto/io.proto", package="google.cloud.automl.v1", syntax="proto3", - serialized_options=_b( - "\n\032com.google.cloud.automl.v1P\001Z`__ for more - information. - - CSV file(s) with each line in format: - - :: - - ML_USE,GCS_FILE_PATH,LABEL,LABEL,... - - - ``ML_USE`` - Identifies the data set that the current row (file) - applies to. This value can be one of the following: - - - ``TRAIN`` - Rows in this file are used to train the model. - - ``TEST`` - Rows in this file are used to test the model during - training. - - ``UNASSIGNED`` - Rows in this file are not categorized. They are - Automatically divided into train and test data. 80% for training - and 20% for testing. - - - ``GCS_FILE_PATH`` - The Google Cloud Storage location of an image of - up to 30MB in size. Supported extensions: .JPEG, .GIF, .PNG, .WEBP, - .BMP, .TIFF, .ICO. - - - ``LABEL`` - A label that identifies the object in the image. - - For the ``MULTICLASS`` classification type, at most one ``LABEL`` is - allowed per image. If an image has not yet been labeled, then it should - be mentioned just once with no ``LABEL``. - - Some sample rows: - - :: - - TRAIN,gs://folder/image1.jpg,daisy - TEST,gs://folder/image2.jpg,dandelion,tulip,rose - UNASSIGNED,gs://folder/image3.jpg,daisy - UNASSIGNED,gs://folder/image4.jpg - - - - - - See `Preparing your training - data `__ - for more information. - - A CSV file(s) with each line in format: - - :: - - ML_USE,GCS_FILE_PATH,[LABEL],(BOUNDING_BOX | ,,,,,,,) - - - ``ML_USE`` - Identifies the data set that the current row (file) - applies to. This value can be one of the following: - - - ``TRAIN`` - Rows in this file are used to train the model. - - ``TEST`` - Rows in this file are used to test the model during - training. - - ``UNASSIGNED`` - Rows in this file are not categorized. They are - Automatically divided into train and test data. 80% for training - and 20% for testing. - - - ``GCS_FILE_PATH`` - The Google Cloud Storage location of an image of - up to 30MB in size. Supported extensions: .JPEG, .GIF, .PNG. Each - image is assumed to be exhaustively labeled. - - - ``LABEL`` - A label that identifies the object in the image specified - by the ``BOUNDING_BOX``. - - - ``BOUNDING BOX`` - The vertices of an object in the example image. - The minimum allowed ``BOUNDING_BOX`` edge length is 0.01, and no more - than 500 ``BOUNDING_BOX`` instances per image are allowed (one - ``BOUNDING_BOX`` per line). If an image has no looked for objects - then it should be mentioned just once with no LABEL and the ",,,,,,," - in place of the ``BOUNDING_BOX``. - - **Four sample rows:** - - :: - - TRAIN,gs://folder/image1.png,car,0.1,0.1,,,0.3,0.3,, - TRAIN,gs://folder/image1.png,bike,.7,.6,,,.8,.9,, - UNASSIGNED,gs://folder/im2.png,car,0.1,0.1,0.2,0.1,0.2,0.3,0.1,0.3 - TEST,gs://folder/im3.png,,,,,,,,, - - - - - - - - - - See `Preparing your training - data `__ for more - information. - - One or more CSV file(s) with each line in the following format: - - :: - - ML_USE,GCS_FILE_PATH - - - ``ML_USE`` - Identifies the data set that the current row (file) - applies to. This value can be one of the following: - - - ``TRAIN`` - Rows in this file are used to train the model. - - ``TEST`` - Rows in this file are used to test the model during - training. - - ``UNASSIGNED`` - Rows in this file are not categorized. They are - Automatically divided into train and test data. 80% for training - and 20% for testing.. - - - ``GCS_FILE_PATH`` - a Identifies JSON Lines (.JSONL) file stored in - Google Cloud Storage that contains in-line text in-line as documents - for model training. - - After the training data set has been determined from the ``TRAIN`` and - ``UNASSIGNED`` CSV files, the training data is divided into train and - validation data sets. 70% for training and 30% for validation. - - For example: - - :: - - TRAIN,gs://folder/file1.jsonl - VALIDATE,gs://folder/file2.jsonl - TEST,gs://folder/file3.jsonl - - **In-line JSONL files** - - In-line .JSONL files contain, per line, a JSON document that wraps a - [``text_snippet``][google.cloud.automl.v1.TextSnippet] field followed by - one or more [``annotations``][google.cloud.automl.v1.AnnotationPayload] - fields, which have ``display_name`` and ``text_extraction`` fields to - describe the entity from the text snippet. Multiple JSON documents can - be separated using line breaks (``\\n``). - - The supplied text must be annotated exhaustively. For example, if you - include the text "horse", but do not label it as "animal", then "horse" - is assumed to not be an "animal". - - Any given text snippet content must have 30,000 characters or less, and - also be UTF-8 NFC encoded. ASCII is accepted as it is UTF-8 NFC encoded. - - For example: - - :: - - { - "text_snippet": { - "content": "dog car cat" - }, - "annotations": [ - { - "display_name": "animal", - "text_extraction": { - "text_segment": {"start_offset": 0, "end_offset": 2} - } - }, - { - "display_name": "vehicle", - "text_extraction": { - "text_segment": {"start_offset": 4, "end_offset": 6} - } - }, - { - "display_name": "animal", - "text_extraction": { - "text_segment": {"start_offset": 8, "end_offset": 10} - } - } - ] - }\\n - { - "text_snippet": { - "content": "This dog is good." - }, - "annotations": [ - { - "display_name": "animal", - "text_extraction": { - "text_segment": {"start_offset": 5, "end_offset": 7} - } - } - ] - } - - **JSONL files that reference documents** - - .JSONL files contain, per line, a JSON document that wraps a - ``input_config`` that contains the path to a source PDF document. - Multiple JSON documents can be separated using line breaks - (``\\n``). - - For example: - - :: - - { - "document": { - "input_config": { - "gcs_source": { "input_uris": [ "gs://folder/document1.pdf" ] - } - } - } - }\\n - { - "document": { - "input_config": { - "gcs_source": { "input_uris": [ "gs://folder/document2.pdf" ] - } - } - } - } - - **In-line JSONL files with PDF layout information** - - **Note:** You can only annotate PDF files using the UI. The format - described below applies to annotated PDF files exported using the UI or - ``exportData``. - - In-line .JSONL files for PDF documents contain, per line, a JSON - document that wraps a ``document`` field that provides the textual - content of the PDF document and the layout information. - - For example: - - :: - - { - "document": { - "document_text": { - "content": "dog car cat" - } - "layout": [ - { - "text_segment": { - "start_offset": 0, - "end_offset": 11, - }, - "page_number": 1, - "bounding_poly": { - "normalized_vertices": [ - {"x": 0.1, "y": 0.1}, - {"x": 0.1, "y": 0.3}, - {"x": 0.3, "y": 0.3}, - {"x": 0.3, "y": 0.1}, - ], - }, - "text_segment_type": TOKEN, - } - ], - "document_dimensions": { - "width": 8.27, - "height": 11.69, - "unit": INCH, - } - "page_count": 3, - }, - "annotations": [ - { - "display_name": "animal", - "text_extraction": { - "text_segment": {"start_offset": 0, "end_offset": 3} - } - }, - { - "display_name": "vehicle", - "text_extraction": { - "text_segment": {"start_offset": 4, "end_offset": 7} - } - }, - { - "display_name": "animal", - "text_extraction": { - "text_segment": {"start_offset": 8, "end_offset": 11} - } - }, - ], - - - - - - See `Preparing your training - data `__ - for more information. - - One or more CSV file(s) with each line in the following format: - - :: - - ML_USE,(TEXT_SNIPPET | GCS_FILE_PATH),LABEL,LABEL,... - - - ``ML_USE`` - Identifies the data set that the current row (file) - applies to. This value can be one of the following: - - - ``TRAIN`` - Rows in this file are used to train the model. - - ``TEST`` - Rows in this file are used to test the model during - training. - - ``UNASSIGNED`` - Rows in this file are not categorized. They are - Automatically divided into train and test data. 80% for training - and 20% for testing. - - - ``TEXT_SNIPPET`` and ``GCS_FILE_PATH`` are distinguished by a - pattern. If the column content is a valid Google Cloud Storage file - path, that is, prefixed by "gs://", it is treated as a - ``GCS_FILE_PATH``. Otherwise, if the content is enclosed in double - quotes (""), it is treated as a ``TEXT_SNIPPET``. For - ``GCS_FILE_PATH``, the path must lead to a file with supported - extension and UTF-8 encoding, for example, "gs://folder/content.txt" - AutoML imports the file content as a text snippet. For - ``TEXT_SNIPPET``, AutoML imports the column content excluding quotes. - In both cases, size of the content must be 10MB or less in size. For - zip files, the size of each file inside the zip must be 10MB or less - in size. - - For the ``MULTICLASS`` classification type, at most one ``LABEL`` is - allowed. The ``ML_USE`` and ``LABEL`` columns are optional. Supported - file extensions: .TXT, .PDF, .ZIP - - A maximum of 100 unique labels are allowed per CSV row. - - Sample rows: - - :: - - TRAIN,"They have bad food and very rude",RudeService,BadFood - gs://folder/content.txt,SlowService - TEST,gs://folder/document.pdf - VALIDATE,gs://folder/text_files.zip,BadFood - - - - - - See `Preparing your training - data `__ - for more information. - - CSV file(s) with each line in format: - - :: - - ML_USE,(TEXT_SNIPPET | GCS_FILE_PATH),SENTIMENT - - - ``ML_USE`` - Identifies the data set that the current row (file) - applies to. This value can be one of the following: - - - ``TRAIN`` - Rows in this file are used to train the model. - - ``TEST`` - Rows in this file are used to test the model during - training. - - ``UNASSIGNED`` - Rows in this file are not categorized. They are - Automatically divided into train and test data. 80% for training - and 20% for testing. - - - ``TEXT_SNIPPET`` and ``GCS_FILE_PATH`` are distinguished by a - pattern. If the column content is a valid Google Cloud Storage file - path, that is, prefixed by "gs://", it is treated as a - ``GCS_FILE_PATH``. Otherwise, if the content is enclosed in double - quotes (""), it is treated as a ``TEXT_SNIPPET``. For - ``GCS_FILE_PATH``, the path must lead to a file with supported - extension and UTF-8 encoding, for example, "gs://folder/content.txt" - AutoML imports the file content as a text snippet. For - ``TEXT_SNIPPET``, AutoML imports the column content excluding quotes. - In both cases, size of the content must be 128kB or less in size. For - zip files, the size of each file inside the zip must be 128kB or less - in size. - - The ``ML_USE`` and ``SENTIMENT`` columns are optional. Supported file - extensions: .TXT, .PDF, .ZIP - - - ``SENTIMENT`` - An integer between 0 and - Dataset.text\_sentiment\_dataset\_metadata.sentiment\_max - (inclusive). Describes the ordinal of the sentiment - higher value - means a more positive sentiment. All the values are completely - relative, i.e. neither 0 needs to mean a negative or neutral - sentiment nor sentiment\_max needs to mean a positive one - it is - just required that 0 is the least positive sentiment in the data, and - sentiment\_max is the most positive one. The SENTIMENT shouldn't be - confused with "score" or "magnitude" from the previous Natural - Language Sentiment Analysis API. All SENTIMENT values between 0 and - sentiment\_max must be represented in the imported data. On - prediction the same 0 to sentiment\_max range will be used. The - difference between neighboring sentiment values needs not to be - uniform, e.g. 1 and 2 may be similar whereas the difference between 2 - and 3 may be large. - - Sample rows: - - :: - - TRAIN,"@freewrytin this is way too good for your product",2 - gs://folder/content.txt,3 - TEST,gs://folder/document.pdf - VALIDATE,gs://folder/text_files.zip,2 - - - - - - **Input field definitions:** - - ``ML_USE`` - ("TRAIN" \| "VALIDATE" \| "TEST" \| "UNASSIGNED") Describes how the - given example (file) should be used for model training. "UNASSIGNED" - can be used when user has no preference. - ``GCS_FILE_PATH`` - The path to a file on Google Cloud Storage. For example, - "gs://folder/image1.png". - ``LABEL`` - A display name of an object on an image, video etc., e.g. "dog". - Must be up to 32 characters long and can consist only of ASCII Latin - letters A-Z and a-z, underscores(\_), and ASCII digits 0-9. For each - label an AnnotationSpec is created which display\_name becomes the - label; AnnotationSpecs are given back in predictions. - ``BOUNDING_BOX`` - (``VERTEX,VERTEX,VERTEX,VERTEX`` \| ``VERTEX,,,VERTEX,,``) A - rectangle parallel to the frame of the example (image, video). If 4 - vertices are given they are connected by edges in the order - provided, if 2 are given they are recognized as diagonally opposite - vertices of the rectangle. - ``VERTEX`` - (``COORDINATE,COORDINATE``) First coordinate is horizontal (x), the - second is vertical (y). - ``COORDINATE`` - A float in 0 to 1 range, relative to total length of image or video - in given dimension. For fractions the leading non-decimal 0 can be - omitted (i.e. 0.3 = .3). Point 0,0 is in top left. - ``TEXT_SNIPPET`` - The content of a text snippet, UTF-8 encoded, enclosed within double - quotes (""). - ``DOCUMENT`` - A field that provides the textual content with document and the - layout information. - - **Errors:** - - If any of the provided CSV files can't be parsed or if more than certain - percent of CSV rows cannot be processed then the operation fails and - nothing is imported. Regardless of overall success or failure the - per-row failures, up to a certain count cap, is listed in - Operation.metadata.partial\_failures. - + “example” file (that is, image, video etc.) with identical content + (even if it had different ``GCS_FILE_PATH``) is mentioned multiple + times, then its label, bounding boxes etc. are appended. The same file + should be always provided with the same ``ML_USE`` and + ``GCS_FILE_PATH``, if it is not, then these values are + nondeterministically selected from the given ones. The formats are + represented in EBNF with commas being literal and with non-terminal + symbols defined near the end of this comment. The formats are: .. + raw:: html

AutoML Vision .. raw:: html

.. raw:: + html
.. raw:: html
+ .. raw:: html
Classification .. raw:: html
See + `Preparing your training data + `__ for more + information. CSV file(s) with each line in format: :: + ML_USE,GCS_FILE_PATH,LABEL,LABEL,... - ``ML_USE`` - Identifies the + data set that the current row (file) applies to. This value can be + one of the following: - ``TRAIN`` - Rows in this file are used to + train the model. - ``TEST`` - Rows in this file are used to test + the model during training. - ``UNASSIGNED`` - Rows in this + file are not categorized. They are Automatically divided into + train and test data. 80% for training and 20% for testing. - + ``GCS_FILE_PATH`` - The Google Cloud Storage location of an image of + up to 30MB in size. Supported extensions: .JPEG, .GIF, .PNG, .WEBP, + .BMP, .TIFF, .ICO. - ``LABEL`` - A label that identifies the object + in the image. For the ``MULTICLASS`` classification type, at most one + ``LABEL`` is allowed per image. If an image has not yet been labeled, + then it should be mentioned just once with no ``LABEL``. Some sample + rows: :: TRAIN,gs://folder/image1.jpg,daisy + TEST,gs://folder/image2.jpg,dandelion,tulip,rose + UNASSIGNED,gs://folder/image3.jpg,daisy + UNASSIGNED,gs://folder/image4.jpg .. raw:: html
.. + raw:: html
.. raw:: html
Object Detection .. + raw:: html
See `Preparing your training data + `__ for more information. A CSV file(s) with + each line in format: :: + ML_USE,GCS_FILE_PATH,[LABEL],(BOUNDING_BOX | ,,,,,,,) - ``ML_USE`` - + Identifies the data set that the current row (file) applies to. + This value can be one of the following: - ``TRAIN`` - Rows in + this file are used to train the model. - ``TEST`` - Rows in this + file are used to test the model during training. - + ``UNASSIGNED`` - Rows in this file are not categorized. They are + Automatically divided into train and test data. 80% for training + and 20% for testing. - ``GCS_FILE_PATH`` - The Google Cloud Storage + location of an image of up to 30MB in size. Supported extensions: + .JPEG, .GIF, .PNG. Each image is assumed to be exhaustively + labeled. - ``LABEL`` - A label that identifies the object in the + image specified by the ``BOUNDING_BOX``. - ``BOUNDING BOX`` - The + vertices of an object in the example image. The minimum allowed + ``BOUNDING_BOX`` edge length is 0.01, and no more than 500 + ``BOUNDING_BOX`` instances per image are allowed (one + ``BOUNDING_BOX`` per line). If an image has no looked for objects + then it should be mentioned just once with no LABEL and the “,,,,,,,” + in place of the ``BOUNDING_BOX``. **Four sample rows:** :: + TRAIN,gs://folder/image1.png,car,0.1,0.1,,,0.3,0.3,, + TRAIN,gs://folder/image1.png,bike,.7,.6,,,.8,.9,, + UNASSIGNED,gs://folder/im2.png,car,0.1,0.1,0.2,0.1,0.2,0.3,0.1,0.3 + TEST,gs://folder/im3.png,,,,,,,,, .. raw:: html
.. + raw:: html
.. raw:: html

AutoML Video + Intelligence .. raw:: html

.. raw:: html
.. raw:: html
.. raw:: html +
Classification .. raw:: html
See `Preparing your + training data `__ for more information. CSV + file(s) with each line in format: :: ML_USE,GCS_FILE_PATH For + ``ML_USE``, do not use ``VALIDATE``. ``GCS_FILE_PATH`` is the path to + another .csv file that describes training example for a given + ``ML_USE``, using the following row format: :: + GCS_FILE_PATH,(LABEL,TIME_SEGMENT_START,TIME_SEGMENT_END | ,,) Here + ``GCS_FILE_PATH`` leads to a video of up to 50GB in size and up to 3h + duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI. + ``TIME_SEGMENT_START`` and ``TIME_SEGMENT_END`` must be within the + length of the video, and the end time must be after the start time. + Any segment of a video which has one or more labels on it, is + considered a hard negative for all other labels. Any segment with no + labels on it is considered to be unknown. If a whole video is unknown, + then it should be mentioned just once with “,,” in place of ``LABEL, + TIME_SEGMENT_START,TIME_SEGMENT_END``. Sample top level CSV file: :: + TRAIN,gs://folder/train_videos.csv TEST,gs://folder/test_videos.csv + UNASSIGNED,gs://folder/other_videos.csv Sample rows of a CSV file for + a particular ML_USE: :: gs://folder/video1.avi,car,120,180.000021 + gs://folder/video1.avi,bike,150,180.000021 + gs://folder/vid2.avi,car,0,60.5 gs://folder/vid3.avi,,, .. raw:: + html
.. raw:: html
.. raw:: html +
Object Tracking .. raw:: html
See `Preparing your + training data `__ for more information. CSV file(s) with each + line in format: :: ML_USE,GCS_FILE_PATH For ``ML_USE``, do not + use ``VALIDATE``. ``GCS_FILE_PATH`` is the path to another .csv file + that describes training example for a given ``ML_USE``, using the + following row format: :: + GCS_FILE_PATH,LABEL,[INSTANCE_ID],TIMESTAMP,BOUNDING_BOX or :: + GCS_FILE_PATH,,,,,,,,,, Here ``GCS_FILE_PATH`` leads to a video of up + to 50GB in size and up to 3h duration. Supported extensions: .MOV, + .MPEG4, .MP4, .AVI. Providing ``INSTANCE_ID``\ s can help to obtain a + better model. When a specific labeled entity leaves the video frame, + and shows up afterwards it is not required, albeit preferable, that + the same ``INSTANCE_ID`` is given to it. ``TIMESTAMP`` must be within + the length of the video, the ``BOUNDING_BOX`` is assumed to be drawn + on the closest video’s frame to the ``TIMESTAMP``. Any mentioned by + the ``TIMESTAMP`` frame is expected to be exhaustively labeled and no + more than 500 ``BOUNDING_BOX``-es per frame are allowed. If a whole + video is unknown, then it should be mentioned just once with + “,,,,,,,,,,” in place of ``LABEL, + [INSTANCE_ID],TIMESTAMP,BOUNDING_BOX``. Sample top level CSV file: + :: TRAIN,gs://folder/train_videos.csv + TEST,gs://folder/test_videos.csv + UNASSIGNED,gs://folder/other_videos.csv Seven sample rows of a CSV + file for a particular ML_USE: :: + gs://folder/video1.avi,car,1,12.10,0.8,0.8,0.9,0.8,0.9,0.9,0.8,0.9 + gs://folder/video1.avi,car,1,12.90,0.4,0.8,0.5,0.8,0.5,0.9,0.4,0.9 + gs://folder/video1.avi,car,2,12.10,.4,.2,.5,.2,.5,.3,.4,.3 + gs://folder/video1.avi,car,2,12.90,.8,.2,,,.9,.3,, + gs://folder/video1.avi,bike,,12.50,.45,.45,,,.55,.55,, + gs://folder/video2.avi,car,1,0,.1,.9,,,.9,.1,, + gs://folder/video2.avi,,,,,,,,,,, .. raw:: html
.. + raw:: html
.. raw:: html

AutoML Natural Language + .. raw:: html

.. raw:: html
.. raw:: html
.. raw:: html
Entity + Extraction .. raw:: html
See `Preparing your training data + `__ for more + information. One or more CSV file(s) with each line in the following + format: :: ML_USE,GCS_FILE_PATH - ``ML_USE`` - Identifies the + data set that the current row (file) applies to. This value can be + one of the following: - ``TRAIN`` - Rows in this file are used to + train the model. - ``TEST`` - Rows in this file are used to test + the model during training. - ``UNASSIGNED`` - Rows in this + file are not categorized. They are Automatically divided into + train and test data. 80% for training and 20% for testing.. - + ``GCS_FILE_PATH`` - a Identifies JSON Lines (.JSONL) file stored in + Google Cloud Storage that contains in-line text in-line as documents + for model training. After the training data set has been determined + from the ``TRAIN`` and ``UNASSIGNED`` CSV files, the training data is + divided into train and validation data sets. 70% for training and 30% + for validation. For example: :: TRAIN,gs://folder/file1.jsonl + VALIDATE,gs://folder/file2.jsonl TEST,gs://folder/file3.jsonl + **In-line JSONL files** In-line .JSONL files contain, per line, a + JSON document that wraps a + [``text_snippet``][google.cloud.automl.v1.TextSnippet] field followed + by one or more + [``annotations``][google.cloud.automl.v1.AnnotationPayload] fields, + which have ``display_name`` and ``text_extraction`` fields to describe + the entity from the text snippet. Multiple JSON documents can be + separated using line breaks (``\\n``). The supplied text must + be annotated exhaustively. For example, if you include the text + “horse”, but do not label it as “animal”, then “horse” is assumed to + not be an “animal”. Any given text snippet content must have 30,000 + characters or less, and also be UTF-8 NFC encoded. ASCII is accepted + as it is UTF-8 NFC encoded. For example: :: { + "text_snippet": { "content": "dog car cat" }, + "annotations": [ { "display_name": "animal", + "text_extraction": { "text_segment": {"start_offset": 0, + "end_offset": 2} } }, { + "display_name": "vehicle", "text_extraction": { + "text_segment": {"start_offset": 4, "end_offset": 6} } + }, { "display_name": "animal", + "text_extraction": { "text_segment": {"start_offset": 8, + "end_offset": 10} } } ] }\\n { + "text_snippet": { "content": "This dog is good." }, + "annotations": [ { "display_name": "animal", + "text_extraction": { "text_segment": {"start_offset": 5, + "end_offset": 7} } } ] } **JSONL files + that reference documents** .JSONL files contain, per line, a JSON + document that wraps a ``input_config`` that contains the path to a + source document. Multiple JSON documents can be separated using line + breaks (``\\n``). Supported document extensions: .PDF, .TIF, + .TIFF For example: :: { "document": { + "input_config": { "gcs_source": { "input_uris": [ + "gs://folder/document1.pdf" ] } } } }\\n { + "document": { "input_config": { "gcs_source": { + "input_uris": [ "gs://folder/document2.tif" ] } } + } } **In-line JSONL files with document layout information** + **Note:** You can only annotate documents using the UI. The format + described below applies to annotated documents exported using the UI + or ``exportData``. In-line .JSONL files for documents contain, per + line, a JSON document that wraps a ``document`` field that provides + the textual content of the document and the layout information. For + example: :: { "document": { "document_text": { + "content": "dog car cat" } "layout": [ + { "text_segment": { + "start_offset": 0, "end_offset": 11, + }, "page_number": 1, + "bounding_poly": { "normalized_vertices": [ + {"x": 0.1, "y": 0.1}, {"x": 0.1, "y": 0.3}, + {"x": 0.3, "y": 0.3}, {"x": 0.3, "y": 0.1}, + ], }, "text_segment_type": + TOKEN, } ], + "document_dimensions": { "width": 8.27, + "height": 11.69, "unit": INCH, } + "page_count": 3, }, "annotations": [ + { "display_name": "animal", + "text_extraction": { "text_segment": {"start_offset": + 0, "end_offset": 3} } }, { + "display_name": "vehicle", "text_extraction": { + "text_segment": {"start_offset": 4, "end_offset": 7} } + }, { "display_name": "animal", + "text_extraction": { "text_segment": {"start_offset": + 8, "end_offset": 11} } }, ], + .. raw:: html
.. raw:: html
.. raw:: + html
Classification .. raw:: html
See `Preparing + your training data `__ for more information. One or more + CSV file(s) with each line in the following format: :: + ML_USE,(TEXT_SNIPPET | GCS_FILE_PATH),LABEL,LABEL,... - ``ML_USE`` - + Identifies the data set that the current row (file) applies to. + This value can be one of the following: - ``TRAIN`` - Rows in + this file are used to train the model. - ``TEST`` - Rows in this + file are used to test the model during training. - + ``UNASSIGNED`` - Rows in this file are not categorized. They are + Automatically divided into train and test data. 80% for training + and 20% for testing. - ``TEXT_SNIPPET`` and ``GCS_FILE_PATH`` are + distinguished by a pattern. If the column content is a valid Google + Cloud Storage file path, that is, prefixed by “gs://”, it is + treated as a ``GCS_FILE_PATH``. Otherwise, if the content is + enclosed in double quotes ("“), it is treated as a + ``TEXT_SNIPPET``. For ``GCS_FILE_PATH``, the path must lead to a + file with supported extension and UTF-8 encoding, for + example,”gs://folder/content.txt" AutoML imports the file content + as a text snippet. For ``TEXT_SNIPPET``, AutoML imports the column + content excluding quotes. In both cases, size of the content must + be 10MB or less in size. For zip files, the size of each file + inside the zip must be 10MB or less in size. For the + ``MULTICLASS`` classification type, at most one ``LABEL`` is + allowed. The ``ML_USE`` and ``LABEL`` columns are optional. + Supported file extensions: .TXT, .PDF, .TIF, .TIFF, .ZIP A maximum + of 100 unique labels are allowed per CSV row. Sample rows: :: + TRAIN,"They have bad food and very rude",RudeService,BadFood + gs://folder/content.txt,SlowService TEST,gs://folder/document.pdf + VALIDATE,gs://folder/text_files.zip,BadFood .. raw:: html +
.. raw:: html
.. raw:: html
+ Sentiment Analysis .. raw:: html
See `Preparing your + training data `__ for more information. CSV file(s) + with each line in format: :: ML_USE,(TEXT_SNIPPET | + GCS_FILE_PATH),SENTIMENT - ``ML_USE`` - Identifies the data set that + the current row (file) applies to. This value can be one of the + following: - ``TRAIN`` - Rows in this file are used to train the + model. - ``TEST`` - Rows in this file are used to test the model + during training. - ``UNASSIGNED`` - Rows in this file are + not categorized. They are Automatically divided into train and + test data. 80% for training and 20% for testing. - + ``TEXT_SNIPPET`` and ``GCS_FILE_PATH`` are distinguished by a + pattern. If the column content is a valid Google Cloud Storage file + path, that is, prefixed by “gs://”, it is treated as a + ``GCS_FILE_PATH``. Otherwise, if the content is enclosed in double + quotes ("“), it is treated as a ``TEXT_SNIPPET``. For + ``GCS_FILE_PATH``, the path must lead to a file with supported + extension and UTF-8 encoding, for example,”gs://folder/content.txt" + AutoML imports the file content as a text snippet. For + ``TEXT_SNIPPET``, AutoML imports the column content excluding quotes. + In both cases, size of the content must be 128kB or less in size. For + zip files, the size of each file inside the zip must be 128kB or less + in size. The ``ML_USE`` and ``SENTIMENT`` columns are optional. + Supported file extensions: .TXT, .PDF, .TIF, .TIFF, .ZIP - + ``SENTIMENT`` - An integer between 0 and + Dataset.text_sentiment_dataset_metadata.sentiment_max (inclusive). + Describes the ordinal of the sentiment - higher value means a more + positive sentiment. All the values are completely relative, + i.e. neither 0 needs to mean a negative or neutral sentiment nor + sentiment_max needs to mean a positive one - it is just required that + 0 is the least positive sentiment in the data, and sentiment_max is + the most positive one. The SENTIMENT shouldn’t be confused with + “score” or “magnitude” from the previous Natural Language Sentiment + Analysis API. All SENTIMENT values between 0 and sentiment_max must + be represented in the imported data. On prediction the same 0 to + sentiment_max range will be used. The difference between neighboring + sentiment values needs not to be uniform, e.g. 1 and 2 may be similar + whereas the difference between 2 and 3 may be large. Sample rows: :: + TRAIN,"@freewrytin this is way too good for your product",2 + gs://folder/content.txt,3 TEST,gs://folder/document.pdf + VALIDATE,gs://folder/text_files.zip,2 .. raw:: html
+ .. raw:: html
.. raw:: html

AutoML Tables .. + raw:: html

.. raw:: html
.. raw:: html
See `Preparing + your training data `__ for more information. You can use either + [gcs_source][google.cloud.automl.v1.InputConfig.gcs_source] or + [bigquery_source][google.cloud.automl.v1.InputConfig.bigquery_source]. + All input is concatenated into a single [primary_table_spec_id][googl + e.cloud.automl.v1.TablesDatasetMetadata.primary_table_spec_id] **For + gcs_source:** CSV file(s), where the first row of the first file is + the header, containing unique column names. If the first row of a + subsequent file is the same as the header, then it is also treated as + a header. All other rows contain values for the corresponding columns. + Each .CSV file by itself must be 10GB or smaller, and their total size + must be 100GB or smaller. First three sample rows of a CSV file: .. + raw:: html
    "Id","First Name","Last
+  Name","Dob","Addresses"     "1","John","Doe","1968-01-22","[{"status":
+  "current","address":"123_First_Avenue","city":"Seattle","state":"WA","
+  zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456\_
+  Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYea
+  rs":"5"}]"     "2","Jane","Doe","1980-10-16","[{"status":"current","ad
+  dress":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","nu
+  mberOfYears":"2"},{"status":"previous","address":"321_Main_Street","ci
+  ty":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]}
+  
**For bigquery_source:** An URI of a BigQuery table. The user + data size of the BigQuery table must be 100GB or smaller. An imported + table must have between 2 and 1,000 columns, inclusive, and between + 1000 and 100,000,000 rows, inclusive. There are at most 5 import data + running in parallel. .. raw:: html
.. raw:: html +
**Input field definitions:** ``ML_USE`` (“TRAIN” \| + “VALIDATE” \| “TEST” \| “UNASSIGNED”) Describes how the given + example (file) should be used for model training. “UNASSIGNED” can + be used when user has no preference. ``GCS_FILE_PATH`` The path to + a file on Google Cloud Storage. For example, + “gs://folder/image1.png”. ``LABEL`` A display name of an object on + an image, video etc., e.g. “dog”. Must be up to 32 characters long + and can consist only of ASCII Latin letters A-Z and a-z, + underscores(_), and ASCII digits 0-9. For each label an + AnnotationSpec is created which display_name becomes the label; + AnnotationSpecs are given back in predictions. ``INSTANCE_ID`` A + positive integer that identifies a specific instance of a labeled + entity on an example. Used e.g. to track two cars on a video while + being able to tell apart which one is which. ``BOUNDING_BOX`` + (``VERTEX,VERTEX,VERTEX,VERTEX`` \| ``VERTEX,,,VERTEX,,``) A + rectangle parallel to the frame of the example (image, video). If 4 + vertices are given they are connected by edges in the order provided, + if 2 are given they are recognized as diagonally opposite vertices of + the rectangle. ``VERTEX`` (``COORDINATE,COORDINATE``) First + coordinate is horizontal (x), the second is vertical (y). + ``COORDINATE`` A float in 0 to 1 range, relative to total length of + image or video in given dimension. For fractions the leading non- + decimal 0 can be omitted (i.e. 0.3 = .3). Point 0,0 is in top left. + ``TIME_SEGMENT_START`` (``TIME_OFFSET``) Expresses a beginning, + inclusive, of a time segment within an example that has a time + dimension (e.g. video). ``TIME_SEGMENT_END`` (``TIME_OFFSET``) + Expresses an end, exclusive, of a time segment within n example + that has a time dimension (e.g. video). ``TIME_OFFSET`` A number of + seconds as measured from the start of an example (e.g. video). + Fractions are allowed, up to a microsecond precision. “inf” is + allowed, and it means the end of the example. ``TEXT_SNIPPET`` The + content of a text snippet, UTF-8 encoded, enclosed within double + quotes (""). ``DOCUMENT`` A field that provides the textual content + with document and the layout information. **Errors:** If any of + the provided CSV files can’t be parsed or if more than certain percent + of CSV rows cannot be processed then the operation fails and nothing + is imported. Regardless of overall success or failure the per-row + failures, up to a certain count cap, is listed in + Operation.metadata.partial_failures. Attributes: source: @@ -1105,10 +1006,14 @@ params: Additional domain-specific parameters describing the semantic of the imported data, any string must be up to 25000 - characters long. + characters long. .. raw:: html

AutoML Tables .. + raw:: html

``schema_inference_version`` + (integer) This value must be supplied. The version of the + algorithm to use for the initial inference of the column + data types of the imported table. Allowed values: “1”. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.InputConfig) - ), + }, ) _sym_db.RegisterMessage(InputConfig) _sym_db.RegisterMessage(InputConfig.ParamsEntry) @@ -1116,155 +1021,152 @@ BatchPredictInputConfig = _reflection.GeneratedProtocolMessageType( "BatchPredictInputConfig", (_message.Message,), - dict( - DESCRIPTOR=_BATCHPREDICTINPUTCONFIG, - __module__="google.cloud.automl_v1.proto.io_pb2", - __doc__="""Input configuration for BatchPredict Action. - - The format of input depends on the ML problem of the model used for - prediction. As input source the - [gcs\_source][google.cloud.automl.v1.InputConfig.gcs\_source] is - expected, unless specified otherwise. - - The formats are represented in EBNF with commas being literal and with - non-terminal symbols defined near the end of this comment. The formats - are: - - - - - - One or more CSV files where each line is a single column: - - :: - - GCS_FILE_PATH - - ``GCS_FILE_PATH`` is the Google Cloud Storage location of a text file. - Supported file extensions: .TXT, .PDF Text files can be no larger than - 10MB in size. - - Sample rows: - - :: - - gs://folder/text1.txt - gs://folder/text2.pdf - - - - - - One or more CSV files where each line is a single column: - - :: - - GCS_FILE_PATH - + { + "DESCRIPTOR": _BATCHPREDICTINPUTCONFIG, + "__module__": "google.cloud.automl_v1.proto.io_pb2", + "__doc__": """Input configuration for BatchPredict Action. The format of input + depends on the ML problem of the model used for prediction. As input + source the [gcs_source][google.cloud.automl.v1.InputConfig.gcs_source] + is expected, unless specified otherwise. The formats are represented + in EBNF with commas being literal and with non-terminal symbols + defined near the end of this comment. The formats are: .. raw:: html +

AutoML Vision .. raw:: html

.. raw:: html
.. raw:: html
.. raw:: html +
Classification .. raw:: html
One or more CSV files + where each line is a single column: :: GCS_FILE_PATH The Google + Cloud Storage location of an image of up to 30MB in size. Supported + extensions: .JPEG, .GIF, .PNG. This path is treated as the ID in the + batch predict output. Sample rows: :: gs://folder/image1.jpeg + gs://folder/image2.gif gs://folder/image3.png .. raw:: html +
.. raw:: html
.. raw:: html
+ Object Detection .. raw:: html
One or more CSV files where + each line is a single column: :: GCS_FILE_PATH The Google Cloud + Storage location of an image of up to 30MB in size. Supported + extensions: .JPEG, .GIF, .PNG. This path is treated as the ID in the + batch predict output. Sample rows: :: gs://folder/image1.jpeg + gs://folder/image2.gif gs://folder/image3.png .. raw:: html +
.. raw:: html
.. raw:: html

AutoML + Video Intelligence .. raw:: html

.. raw:: html
.. raw:: html
.. raw:: html +
Classification .. raw:: html
One or more CSV files + where each line is a single column: :: + GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END ``GCS_FILE_PATH`` + is the Google Cloud Storage location of video up to 50GB in size and + up to 3h in duration duration. Supported extensions: .MOV, .MPEG4, + .MP4, .AVI. ``TIME_SEGMENT_START`` and ``TIME_SEGMENT_END`` must be + within the length of the video, and the end time must be after the + start time. Sample rows: :: gs://folder/video1.mp4,10,40 + gs://folder/video1.mp4,20,60 gs://folder/vid2.mov,0,inf .. raw:: + html
.. raw:: html
.. raw:: html +
Object Tracking .. raw:: html
One or more CSV files + where each line is a single column: :: + GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END ``GCS_FILE_PATH`` + is the Google Cloud Storage location of video up to 50GB in size and + up to 3h in duration duration. Supported extensions: .MOV, .MPEG4, + .MP4, .AVI. ``TIME_SEGMENT_START`` and ``TIME_SEGMENT_END`` must be + within the length of the video, and the end time must be after the + start time. Sample rows: :: gs://folder/video1.mp4,10,40 + gs://folder/video1.mp4,20,60 gs://folder/vid2.mov,0,inf .. raw:: + html
.. raw:: html
.. raw:: html

+ AutoML Natural Language .. raw:: html

.. raw:: html +
.. raw:: html
.. raw:: + html
Classification .. raw:: html
One or more + CSV files where each line is a single column: :: GCS_FILE_PATH ``GCS_FILE_PATH`` is the Google Cloud Storage location of a text file. - Supported file extensions: .TXT, .PDF Text files can be no larger than - 128kB in size. - - Sample rows: - - :: - - gs://folder/text1.txt - gs://folder/text2.pdf - - - - - - One or more JSONL (JSON Lines) files that either provide inline text or - documents. You can only use one format, either inline text or documents, - for a single call to [AutoMl.BatchPredict]. - - Each JSONL file contains a per line a proto that wraps a temporary - user-assigned TextSnippet ID (string up to 2000 characters long) called - "id", a TextSnippet proto (in JSON representation) and zero or more + Supported file extensions: .TXT, .PDF, .TIF, .TIFF Text files can be + no larger than 10MB in size. Sample rows: :: + gs://folder/text1.txt gs://folder/text2.pdf + gs://folder/text3.tif .. raw:: html
.. raw:: html +
.. raw:: html
Sentiment Analysis .. raw:: html +
One or more CSV files where each line is a single column: :: + GCS_FILE_PATH ``GCS_FILE_PATH`` is the Google Cloud Storage location + of a text file. Supported file extensions: .TXT, .PDF, .TIF, .TIFF + Text files can be no larger than 128kB in size. Sample rows: :: + gs://folder/text1.txt gs://folder/text2.pdf + gs://folder/text3.tif .. raw:: html
.. raw:: html +
.. raw:: html
Entity Extraction .. raw:: html +
One or more JSONL (JSON Lines) files that either provide inline + text or documents. You can only use one format, either inline text or + documents, for a single call to [AutoMl.BatchPredict]. Each JSONL + file contains a per line a proto that wraps a temporary user-assigned + TextSnippet ID (string up to 2000 characters long) called “id”, a + TextSnippet proto (in JSON representation) and zero or more TextFeature protos. Any given text snippet content must have 30,000 characters or less, and also be UTF-8 NFC encoded (ASCII already is). - The IDs provided should be unique. - - Each document JSONL file contains, per line, a proto that wraps a - Document proto with ``input_config`` set. Only PDF documents are - currently supported, and each PDF document cannot exceed 2MB in size. - - Each JSONL file must not exceed 100MB in size, and no more than 20 JSONL - files may be passed. - - Sample inline JSONL file (Shown with artificial line breaks. Actual line - breaks are denoted by "``\\n``".): - - :: - - { - "id": "my_first_id", - "text_snippet": { "content": "dog car cat"}, - "text_features": [ - { - "text_segment": {"start_offset": 4, "end_offset": 6}, - "structural_type": PARAGRAPH, - "bounding_poly": { - "normalized_vertices": [ - {"x": 0.1, "y": 0.1}, - {"x": 0.1, "y": 0.3}, - {"x": 0.3, "y": 0.3}, - {"x": 0.3, "y": 0.1}, - ] - }, - } - ], - }\\n - { - "id": "2", - "text_snippet": { - "content": "Extended sample content", - "mime_type": "text/plain" - } - } - - Sample document JSONL file (Shown with artificial line breaks. Actual - line breaks are denoted by "``\\n``".): - - :: - - { - "document": { - "input_config": { - "gcs_source": { "input_uris": [ "gs://folder/document1.pdf" ] - } - } - } - }\\n - { - "document": { - "input_config": { - "gcs_source": { "input_uris": [ "gs://folder/document2.pdf" ] - } - } - } - } - - - - - - **Input field definitions:** - - ``GCS_FILE_PATH`` - The path to a file on Google Cloud Storage. For example, - "gs://folder/video.avi". - - **Errors:** - - If any of the provided CSV files can't be parsed or if more than certain - percent of CSV rows cannot be processed then the operation fails and - prediction does not happen. Regardless of overall success or failure the - per-row failures, up to a certain count cap, will be listed in - Operation.metadata.partial\_failures. - + The IDs provided should be unique. Each document JSONL file contains, + per line, a proto that wraps a Document proto with ``input_config`` + set. Each document cannot exceed 2MB in size. Supported document + extensions: .PDF, .TIF, .TIFF Each JSONL file must not exceed 100MB + in size, and no more than 20 JSONL files may be passed. Sample inline + JSONL file (Shown with artificial line breaks. Actual line breaks are + denoted by “``\\n``”.): :: { "id": "my_first_id", + "text_snippet": { "content": "dog car cat"}, "text_features": [ + { "text_segment": {"start_offset": 4, "end_offset": 6}, + "structural_type": PARAGRAPH, "bounding_poly": { + "normalized_vertices": [ {"x": 0.1, "y": 0.1}, + {"x": 0.1, "y": 0.3}, {"x": 0.3, "y": 0.3}, + {"x": 0.3, "y": 0.1}, ] }, } ], + }\\n { "id": "2", "text_snippet": { "content": + "Extended sample content", "mime_type": "text/plain" } + } Sample document JSONL file (Shown with artificial line breaks. + Actual line breaks are denoted by “``\\n``”.): :: { + "document": { "input_config": { "gcs_source": { + "input_uris": [ "gs://folder/document1.pdf" ] } } + } }\\n { "document": { "input_config": { + "gcs_source": { "input_uris": [ "gs://folder/document2.tif" ] + } } } } .. raw:: html
.. raw:: html +
.. raw:: html

AutoML Tables .. raw:: html

+ .. raw:: html
.. raw:: html +
See `Preparing your training data + `__ for + more information. You can use either [gcs_source][google.cloud.automl + .v1.BatchPredictInputConfig.gcs_source] or + [bigquery_source][BatchPredictInputConfig.bigquery_source]. **For + gcs_source:** CSV file(s), each by itself 10GB or smaller and total + size must be 100GB or smaller, where first file must have a header + containing column names. If the first row of a subsequent file is the + same as the header, then it is also treated as a header. All other + rows contain values for the corresponding columns. The column names + must contain the model’s [input_feature_column_specs’][google.cloud.a + utoml.v1.TablesModelMetadata.input_feature_column_specs] + [display_name-s][google.cloud.automl.v1.ColumnSpec.display_name] + (order doesn’t matter). The columns corresponding to the model’s input + feature column specs must contain values compatible with the column + spec’s data types. Prediction on all the rows, i.e. the CSV lines, + will be attempted. Sample rows from a CSV file: .. raw:: html +
    "First Name","Last Name","Dob","Addresses"     "John","Doe","
+  1968-01-22","[{"status":"current","address":"123_First_Avenue","city":
+  "Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"p
+  revious","address":"456_Main_Street","city":"Portland","state":"OR","z
+  ip":"22222","numberOfYears":"5"}]"     "Jane","Doe","1980-10-16","[{"s
+  tatus":"current","address":"789_Any_Avenue","city":"Albany","state":"N
+  Y","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"
+  321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOf
+  Years":"3"}]}    
**For bigquery_source:** The URI of a + BigQuery table. The user data size of the BigQuery table must be 100GB + or smaller. The column names must contain the model’s [input_feature + _column_specs’][google.cloud.automl.v1.TablesModelMetadata.input_featu + re_column_specs] + [display_name-s][google.cloud.automl.v1.ColumnSpec.display_name] + (order doesn’t matter). The columns corresponding to the model’s input + feature column specs must contain values compatible with the column + spec’s data types. Prediction on all the rows of the table will be + attempted. .. raw:: html
.. raw:: html
+ **Input field definitions:** ``GCS_FILE_PATH`` The path to a file + on Google Cloud Storage. For example, “gs://folder/video.avi”. + ``TIME_SEGMENT_START`` (``TIME_OFFSET``) Expresses a beginning, + inclusive, of a time segment within an example that has a time + dimension (e.g. video). ``TIME_SEGMENT_END`` (``TIME_OFFSET``) + Expresses an end, exclusive, of a time segment within n example + that has a time dimension (e.g. video). ``TIME_OFFSET`` A number of + seconds as measured from the start of an example (e.g. video). + Fractions are allowed, up to a microsecond precision. “inf” is + allowed, and it means the end of the example. **Errors:** If any of + the provided CSV files can’t be parsed or if more than certain percent + of CSV rows cannot be processed then the operation fails and + prediction does not happen. Regardless of overall success or failure + the per-row failures, up to a certain count cap, will be listed in + Operation.metadata.partial_failures. Attributes: source: @@ -1274,19 +1176,17 @@ content. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.BatchPredictInputConfig) - ), + }, ) _sym_db.RegisterMessage(BatchPredictInputConfig) DocumentInputConfig = _reflection.GeneratedProtocolMessageType( "DocumentInputConfig", (_message.Message,), - dict( - DESCRIPTOR=_DOCUMENTINPUTCONFIG, - __module__="google.cloud.automl_v1.proto.io_pb2", - __doc__="""Input configuration of a - [Document][google.cloud.automl.v1.Document]. - + { + "DESCRIPTOR": _DOCUMENTINPUTCONFIG, + "__module__": "google.cloud.automl_v1.proto.io_pb2", + "__doc__": """Input configuration of a [Document][google.cloud.automl.v1.Document]. Attributes: gcs_source: @@ -1295,77 +1195,36 @@ Supported extensions: .PDF. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.DocumentInputConfig) - ), + }, ) _sym_db.RegisterMessage(DocumentInputConfig) OutputConfig = _reflection.GeneratedProtocolMessageType( "OutputConfig", (_message.Message,), - dict( - DESCRIPTOR=_OUTPUTCONFIG, - __module__="google.cloud.automl_v1.proto.io_pb2", - __doc__="""Output configuration for ExportData. - - As destination the - [gcs\_destination][google.cloud.automl.v1.OutputConfig.gcs\_destination] - must be set unless specified otherwise for a domain. If gcs\_destination - is set then in the given directory a new directory is created. Its name - will be "export\_data--", where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ - ISO-8601 format. Only ground truth annotations are exported (not - approved annotations are not exported). - - The outputs correspond to how the data was imported, and may be used as - input to import data. The output formats are represented as EBNF with - literal commas and same non-terminal symbols definitions are these in - import data's [InputConfig][google.cloud.automl.v1.InputConfig]: - - - For Image Classification: CSV file(s) ``image_classification_1.csv``, - ``image_classification_2.csv``,...,\ ``image_classification_N.csv``\ with - each line in format: ML\_USE,GCS\_FILE\_PATH,LABEL,LABEL,... where - GCS\_FILE\_PATHs point at the original, source locations of the - imported images. For MULTICLASS classification type, there can be at - most one LABEL per example. - - - For Image Object Detection: CSV file(s) - ``image_object_detection_1.csv``, - ``image_object_detection_2.csv``,...,\ ``image_object_detection_N.csv`` - with each line in format: - ML\_USE,GCS\_FILE\_PATH,[LABEL],(BOUNDING\_BOX \| ,,,,,,,) where - GCS\_FILE\_PATHs point at the original, source locations of the - imported images. - - - For Text Classification: In the created directory CSV file(s) - ``text_classification_1.csv``, ``text_classification_2.csv``, - ...,\ ``text_classification_N.csv`` will be created where N depends - on the total number of examples exported. Each line in the CSV is of - the format: ML\_USE,GCS\_FILE\_PATH,LABEL,LABEL,... where - GCS\_FILE\_PATHs point at the exported .txt files containing the text - content of the imported example. For MULTICLASS classification type, - there will be at most one LABEL per example. - - - For Text Sentiment: In the created directory CSV file(s) - ``text_sentiment_1.csv``, ``text_sentiment_2.csv``, - ...,\ ``text_sentiment_N.csv`` will be created where N depends on the - total number of examples exported. Each line in the CSV is of the - format: ML\_USE,GCS\_FILE\_PATH,SENTIMENT where GCS\_FILE\_PATHs - point at the exported .txt files containing the text content of the - imported example. - - - For Text Extraction: CSV file ``text_extraction.csv``, with each line - in format: ML\_USE,GCS\_FILE\_PATH GCS\_FILE\_PATH leads to a .JSONL - (i.e. JSON Lines) file which contains, per line, a proto that wraps a - TextSnippet proto (in json representation) followed by - AnnotationPayload protos (called annotations). If initially documents - had been imported, the JSONL will point at the original, source - locations of the imported documents. - - - For Translation: CSV file ``translation.csv``, with each line in - format: ML\_USE,GCS\_FILE\_PATH GCS\_FILE\_PATH leads to a .TSV file - which describes examples that have given ML\_USE, using the following - row format per line: TEXT\_SNIPPET (in source language) - \\tTEXT\_SNIPPET (in target language) - + { + "DESCRIPTOR": _OUTPUTCONFIG, + "__module__": "google.cloud.automl_v1.proto.io_pb2", + "__doc__": """\* For Translation: CSV file ``translation.csv``, with each line in + format: ML_USE,GCS_FILE_PATH GCS_FILE_PATH leads to a .TSV file which + describes examples that have given ML_USE, using the following row + format per line: TEXT_SNIPPET (in source language) :raw-latex:`\t + `TEXT_SNIPPET (in target language) - For Tables: Output depends on + whether the dataset was imported from Google Cloud Storage or + BigQuery. Google Cloud Storage case: [gcs_destination][google.cloud.a + utoml.v1p1beta.OutputConfig.gcs_destination] must be set. Exported are + CSV file(s) ``tables_1.csv``, ``tables_2.csv``,…,\ ``tables_N.csv`` + with each having as header line the table’s column names, and all + other lines contain values for the header columns. BigQuery case: [bi + gquery_destination][google.cloud.automl.v1p1beta.OutputConfig.bigquery + _destination] pointing to a BigQuery project must be set. In the given + project a new dataset will be created with name + ``export_data__`` where will be made BigQuery-dataset-name compatible (e.g. most + special characters will become underscores), and timestamp will be in + YYYY_MM_DDThh_mm_ss_sssZ “based on ISO-8601” format. In that dataset a + new table called ``primary_table`` will be created, and filled with + precisely the same data as this obtained on import. Attributes: destination: @@ -1373,114 +1232,237 @@ gcs_destination: Required. The Google Cloud Storage location where the output is to be written to. For Image Object Detection, Text - Extraction in the given directory a new directory will be - created with name: export\_data-- where timestamp is in YYYY- - MM-DDThh:mm:ss.sssZ ISO-8601 format. All export output will be - written into that directory. + Extraction, Video Classification and Tables, in the given + directory a new directory will be created with name: + export_data-- where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ + ISO-8601 format. All export output will be written into that + directory. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.OutputConfig) - ), + }, ) _sym_db.RegisterMessage(OutputConfig) BatchPredictOutputConfig = _reflection.GeneratedProtocolMessageType( "BatchPredictOutputConfig", (_message.Message,), - dict( - DESCRIPTOR=_BATCHPREDICTOUTPUTCONFIG, - __module__="google.cloud.automl_v1.proto.io_pb2", - __doc__="""Output configuration for BatchPredict Action. - - As destination the - - [gcs\_destination][google.cloud.automl.v1.BatchPredictOutputConfig.gcs\_destination] - must be set unless specified otherwise for a domain. If gcs\_destination - is set then in the given directory a new directory is created. Its name - will be "prediction--", where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ - ISO-8601 format. The contents of it depends on the ML problem the - predictions are made for. - - - For Text Classification: In the created directory files - ``text_classification_1.jsonl``, - ``text_classification_2.jsonl``,...,\ ``text_classification_N.jsonl`` - will be created, where N may be 1, and depends on the total number of - inputs and annotations found. - - :: - - Each .JSONL file will contain, per line, a JSON representation of a - proto that wraps input text (or pdf) file in - the text snippet (or document) proto and a list of - zero or more AnnotationPayload protos (called annotations), which - have classification detail populated. A single text (or pdf) file - will be listed only once with all its annotations, and its - annotations will never be split across files. - - If prediction for any text (or pdf) file failed (partially or - completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,..., - `errors_N.jsonl` files will be created (N depends on total number of - failed predictions). These files will have a JSON representation of a - proto that wraps input text (or pdf) file followed by exactly one - - ```google.rpc.Status`` `__ + { + "DESCRIPTOR": _BATCHPREDICTOUTPUTCONFIG, + "__module__": "google.cloud.automl_v1.proto.io_pb2", + "__doc__": """Output configuration for BatchPredict Action. As destination the [gc + s_destination][google.cloud.automl.v1.BatchPredictOutputConfig.gcs_des + tination] must be set unless specified otherwise for a domain. If + gcs_destination is set then in the given directory a new directory is + created. Its name will be “prediction--”, where timestamp is in YYYY- + MM-DDThh:mm:ss.sssZ ISO-8601 format. The contents of it depends on the + ML problem the predictions are made for. - For Image Classification: + In the created directory files ``image_classification_1.jsonl``, + ``image_classification_2.jsonl``,…,\ ``image_classification_N.jsonl`` + will be created, where N may be 1, and depends on the total number of + the successfully predicted images and annotations. A single image + will be listed only once with all its annotations, and its + annotations will never be split across files. Each .JSONL file will + contain, per line, a JSON representation of a proto that wraps + image’s “ID” : “” followed by a list of zero or more + AnnotationPayload protos (called annotations), which have + classification detail populated. If prediction for any image failed + (partially or completely), then an additional ``errors_1.jsonl``, + ``errors_2.jsonl``,…, ``errors_N.jsonl`` files will be created (N + depends on total number of failed predictions). These files will have + a JSON representation of a proto that wraps the same “ID” : “” but + here followed by exactly one ```google.rpc.Status`` `_\_ + containing only ``code`` and ``message``\ fields. - For Image Object + Detection: In the created directory files + ``image_object_detection_1.jsonl``, + ``image_object_detection_2.jsonl``,…,\ + ``image_object_detection_N.jsonl`` will be created, where N may be + 1, and depends on the total number of the successfully predicted + images and annotations. Each .JSONL file will contain, per line, a + JSON representation of a proto that wraps image’s “ID” : “” + followed by a list of zero or more AnnotationPayload protos (called + annotations), which have image_object_detection detail populated. A + single image will be listed only once with all its annotations, and + its annotations will never be split across files. If prediction for + any image failed (partially or completely), then additional + ``errors_1.jsonl``, ``errors_2.jsonl``,…, ``errors_N.jsonl`` files + will be created (N depends on total number of failed predictions). + These files will have a JSON representation of a proto that wraps + the same “ID” : “” but here followed by exactly one + ```google.rpc.Status`` `__ containing only ``code`` and + ``message``\ fields. \* For Video Classification: In the created + directory a video_classification.csv file, and a .JSON file per each + video classification requested in the input (i.e. each line in given + CSV(s)), will be created. :: The format of + video_classification.csv is: GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SE + GMENT_END,JSON_FILE_NAME,STATUS where: + GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to 1 the + prediction input lines (i.e. video_classification.csv has precisely + the same number of lines as the prediction input had.) JSON_FILE_NAME + = Name of .JSON file in the output directory, which contains + prediction responses for the video time segment. STATUS = “OK” if + prediction completed successfully, or an error code with message + otherwise. If STATUS is not “OK” then the .JSON file for that line may + not exist or be empty. :: Each .JSON file, assuming STATUS is + "OK", will contain a list of AnnotationPayload protos in JSON + format, which are the predictions for the video time segment + the file is assigned to in the video_classification.csv. All + AnnotationPayload protos will have video_classification field + set, and will be sorted by video_classification.type field + (note that the returned types are governed by + `classifaction_types` parameter in + [PredictService.BatchPredictRequest.params][]). - For Video Object + Tracking: In the created directory a video_object_tracking.csv file + will be created, and multiple files video_object_trackinng_1.json, + video_object_trackinng_2.json,…, video_object_trackinng_N.json, + where N is the number of requests in the input (i.e. the number of + lines in given CSV(s)). :: The format of + video_object_tracking.csv is: GCS_FILE_PATH,TIME_SEGMENT_START,TIME_S + EGMENT_END,JSON_FILE_NAME,STATUS where: + GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to 1 the + prediction input lines (i.e. video_object_tracking.csv has precisely + the same number of lines as the prediction input had.) JSON_FILE_NAME + = Name of .JSON file in the output directory, which contains + prediction responses for the video time segment. STATUS = “OK” if + prediction completed successfully, or an error code with message + otherwise. If STATUS is not “OK” then the .JSON file for that line may + not exist or be empty. :: Each .JSON file, assuming STATUS is + "OK", will contain a list of AnnotationPayload protos in JSON + format, which are the predictions for each frame of the video + time segment the file is assigned to in + video_object_tracking.csv. All AnnotationPayload protos will have + video_object_tracking field set. - For Text Classification: In the + created directory files ``text_classification_1.jsonl``, + ``text_classification_2.jsonl``,…,\ ``text_classification_N.jsonl`` + will be created, where N may be 1, and depends on the total number of + inputs and annotations found. :: Each .JSONL file will + contain, per line, a JSON representation of a proto that wraps + input text file (or document) in the text snippet (or document) + proto and a list of zero or more AnnotationPayload protos + (called annotations), which have classification detail + populated. A single text file (or document) will be listed only + once with all its annotations, and its annotations will never be + split across files. If prediction for any input file (or + document) failed (partially or completely), then additional + `errors_1.jsonl`, `errors_2.jsonl`,..., `errors_N.jsonl` files + will be created (N depends on total number of failed + predictions). These files will have a JSON representation of a + proto that wraps input file followed by exactly one + ```google.rpc.Status`` `__ containing only ``code`` and + ``message``. - For Text Sentiment: In the created directory files + ``text_sentiment_1.jsonl``, ``text_sentiment_2.jsonl``,…,\ + ``text_sentiment_N.jsonl`` will be created, where N may be 1, and + depends on the total number of inputs and annotations found. :: + Each .JSONL file will contain, per line, a JSON representation of a + proto that wraps input text file (or document) in the text + snippet (or document) proto and a list of zero or more + AnnotationPayload protos (called annotations), which have + text_sentiment detail populated. A single text file (or + document) will be listed only once with all its annotations, and its + annotations will never be split across files. If prediction for + any input file (or document) failed (partially or completely), + then additional `errors_1.jsonl`, `errors_2.jsonl`,..., + `errors_N.jsonl` files will be created (N depends on total number of + failed predictions). These files will have a JSON representation of a + proto that wraps input file followed by exactly one + ```google.rpc.Status`` `__ containing only ``code`` and + ``message``. - For Text Extraction: In the created directory files + ``text_extraction_1.jsonl``, ``text_extraction_2.jsonl``,…,\ + ``text_extraction_N.jsonl`` will be created, where N may be 1, and + depends on the total number of inputs and annotations found. The + contents of these .JSONL file(s) depend on whether the input used + inline text, or documents. If input was inline, then each .JSONL + file will contain, per line, a JSON representation of a proto that + wraps given in request text snippet’s “id” (if specified), followed + by input text snippet, and a list of zero or more AnnotationPayload + protos (called annotations), which have text_extraction detail + populated. A single text snippet will be listed only once with all + its annotations, and its annotations will never be split across + files. If input used documents, then each .JSONL file will contain, + per line, a JSON representation of a proto that wraps given in + request document proto, followed by its OCR-ed representation in + the form of a text snippet, finally followed by a list of zero or + more AnnotationPayload protos (called annotations), which have + text_extraction detail populated and refer, via their indices, to + the OCR-ed text snippet. A single document (and its text snippet) + will be listed only once with all its annotations, and its + annotations will never be split across files. If prediction for any + text snippet failed (partially or completely), then additional + ``errors_1.jsonl``, ``errors_2.jsonl``,…, ``errors_N.jsonl`` files + will be created (N depends on total number of failed predictions). + These files will have a JSON representation of a proto that wraps + either the “id” : “” (in case of inline) or the document proto (in + case of document) but here followed by exactly one + ```google.rpc.Status`` `__ containing only ``code`` and + ``message``. - For Tables: Output depends on whether [gcs_destinati + on][google.cloud.automl.v1p1beta.BatchPredictOutputConfig.gcs_destinat + ion] or [bigquery_destination][google.cloud.automl.v1p1beta.BatchPred + ictOutputConfig.bigquery_destination] is set (either is allowed). + Google Cloud Storage case: In the created directory files + ``tables_1.csv``, ``tables_2.csv``,…, ``tables_N.csv`` will be + created, where N may be 1, and depends on the total number of the + successfully predicted rows. For all CLASSIFICATION [prediction_type- + s][google.cloud.automl.v1p1beta.TablesModelMetadata.prediction_type]: + Each .csv file will contain a header, listing all columns’ + [display_name-s][google.cloud.automl.v1p1beta.ColumnSpec.display_name] + given on input followed by M target column names in the format of "<[ + target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata. + target_column_spec] [display_name][google.cloud.automl.v1p1beta.Colum + nSpec.display_name]>\_\_score" where M is the number of distinct + target values, i.e. number of distinct values in the target column of + the table used to train the model. Subsequent lines will contain the + respective values of successfully predicted rows, with the last, + i.e. the target, columns having the corresponding prediction + [scores][google.cloud.automl.v1p1beta.TablesAnnotation.score]. For + REGRESSION and FORECASTING [prediction_type-s][google.cloud.automl.v1 + p1beta.TablesModelMetadata.prediction_type]: Each .csv file will + contain a header, listing all columns’ + [display_name-s][google.cloud.automl.v1p1beta.display_name] given on + input followed by the predicted target column with name in the format + of "predicted_<[target_column_specs][google.cloud.automl.v1p1beta.Tab + lesModelMetadata.target_column_spec] + [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>" + Subsequent lines will contain the respective values of successfully + predicted rows, with the last, i.e. the target, column having the + predicted target value. If prediction for any rows failed, then an + additional ``errors_1.csv``, ``errors_2.csv``,…, ``errors_N.csv`` will + be created (N depends on total number of failed rows). These files + will have analogous format as ``tables_*.csv``, but always with a + single target column having ```google.rpc.Status`` `_\_ + represented as a JSON string, and containing only ``code`` and + ``message``. BigQuery case: [bigquery_destination][google.cloud.autom + l.v1p1beta.OutputConfig.bigquery_destination] pointing to a BigQuery + project must be set. In the given project a new dataset will be + created with name ``prediction__`` where will be made BigQuery-dataset-name compatible + (e.g. most special characters will become underscores), and timestamp + will be in YYYY_MM_DDThh_mm_ss_sssZ “based on ISO-8601” format. In the + dataset two tables will be created, ``predictions``, and ``errors``. + The ``predictions`` table’s column names will be the input columns’ + [display_name-s][google.cloud.automl.v1p1beta.ColumnSpec.display_name] + followed by the target column with name in the format of "predicted_< + [target_column_specs][google.cloud.automl.v1p1beta.TablesModelMetadata + .target_column_spec] + [display_name][google.cloud.automl.v1p1beta.ColumnSpec.display_name]>" + The input feature columns will contain the respective values of + successfully predicted rows, with the target column having an ARRAY of + [AnnotationPayloads][google.cloud.automl.v1p1beta.AnnotationPayload], + represented as STRUCT-s, containing + [TablesAnnotation][google.cloud.automl.v1p1beta.TablesAnnotation]. The + ``errors`` table contains rows for which the prediction has failed, it + has analogous input columns while the target column name is in the + format of "errors_<[target_column_specs][google.cloud.automl.v1p1beta + .TablesModelMetadata.target_column_spec] [display_name][google.cloud. + automl.v1p1beta.ColumnSpec.display_name]>", and as a value has + ```google.rpc.Status`` `__ represented as a STRUCT, and containing only ``code`` and ``message``. - - For Text Sentiment: In the created directory files - ``text_sentiment_1.jsonl``, - ``text_sentiment_2.jsonl``,...,\ ``text_sentiment_N.jsonl`` will be - created, where N may be 1, and depends on the total number of inputs - and annotations found. - - :: - - Each .JSONL file will contain, per line, a JSON representation of a - proto that wraps input text (or pdf) file in - the text snippet (or document) proto and a list of - zero or more AnnotationPayload protos (called annotations), which - have text_sentiment detail populated. A single text (or pdf) file - will be listed only once with all its annotations, and its - annotations will never be split across files. - - If prediction for any text (or pdf) file failed (partially or - completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,..., - `errors_N.jsonl` files will be created (N depends on total number of - failed predictions). These files will have a JSON representation of a - proto that wraps input text (or pdf) file followed by exactly one - - ```google.rpc.Status`` `__ - containing only ``code`` and ``message``. - - - For Text Extraction: In the created directory files - ``text_extraction_1.jsonl``, - ``text_extraction_2.jsonl``,...,\ ``text_extraction_N.jsonl`` will be - created, where N may be 1, and depends on the total number of inputs - and annotations found. The contents of these .JSONL file(s) depend on - whether the input used inline text, or documents. If input was - inline, then each .JSONL file will contain, per line, a JSON - representation of a proto that wraps given in request text snippet's - "id" (if specified), followed by input text snippet, and a list of - zero or more AnnotationPayload protos (called annotations), which - have text\_extraction detail populated. A single text snippet will be - listed only once with all its annotations, and its annotations will - never be split across files. If input used documents, then each - .JSONL file will contain, per line, a JSON representation of a proto - that wraps given in request document proto, followed by its OCR-ed - representation in the form of a text snippet, finally followed by a - list of zero or more AnnotationPayload protos (called annotations), - which have text\_extraction detail populated and refer, via their - indices, to the OCR-ed text snippet. A single document (and its text - snippet) will be listed only once with all its annotations, and its - annotations will never be split across files. If prediction for any - text snippet failed (partially or completely), then additional - ``errors_1.jsonl``, ``errors_2.jsonl``,..., ``errors_N.jsonl`` files - will be created (N depends on total number of failed predictions). - These files will have a JSON representation of a proto that wraps - either the "id" : "" (in case of inline) or the document proto (in - case of document) but here followed by exactly one - ```google.rpc.Status`` `__ - containing only ``code`` and ``message``. - - Attributes: destination: The destination of the output. @@ -1489,27 +1471,26 @@ where the output is to be written to. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.BatchPredictOutputConfig) - ), + }, ) _sym_db.RegisterMessage(BatchPredictOutputConfig) ModelExportOutputConfig = _reflection.GeneratedProtocolMessageType( "ModelExportOutputConfig", (_message.Message,), - dict( - ParamsEntry=_reflection.GeneratedProtocolMessageType( + { + "ParamsEntry": _reflection.GeneratedProtocolMessageType( "ParamsEntry", (_message.Message,), - dict( - DESCRIPTOR=_MODELEXPORTOUTPUTCONFIG_PARAMSENTRY, - __module__="google.cloud.automl_v1.proto.io_pb2" + { + "DESCRIPTOR": _MODELEXPORTOUTPUTCONFIG_PARAMSENTRY, + "__module__": "google.cloud.automl_v1.proto.io_pb2" # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.ModelExportOutputConfig.ParamsEntry) - ), + }, ), - DESCRIPTOR=_MODELEXPORTOUTPUTCONFIG, - __module__="google.cloud.automl_v1.proto.io_pb2", - __doc__="""Output configuration for ModelExport Action. - + "DESCRIPTOR": _MODELEXPORTOUTPUTCONFIG, + "__module__": "google.cloud.automl_v1.proto.io_pb2", + "__doc__": """Output configuration for ModelExport Action. Attributes: destination: @@ -1517,39 +1498,47 @@ gcs_destination: Required. The Google Cloud Storage location where the model is to be written to. This location may only be set for the - following model formats: "tflite", "edgetpu\_tflite", - "tf\_saved\_model", "tf\_js", "core\_ml". Under the directory - given as the destination a new one with name "model-export--", + following model formats: “tflite”, “edgetpu_tflite”, + “tf_saved_model”, “tf_js”, “core_ml”. Under the directory + given as the destination a new one with name “model-export--”, where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format, will be created. Inside the model and any of its supporting files will be written. model_format: The format in which the model must be exported. The available, and default, formats depend on the problem and model type (if - given problem and type combination doesn't have a format + given problem and type combination doesn’t have a format listed, it means its models are not exportable): - For Image Classification mobile-low-latency-1, mobile-versatile-1, - mobile-high-accuracy-1: "tflite" (default), "edgetpu\_tflite", - "tf\_saved\_model", "tf\_js". - For Image Classification - mobile-core-ml-low-latency-1, mobile-core-ml-versatile-1, - mobile-core-ml-high-accuracy-1: "core\_ml" (default). - - For Image Object Detection mobile-low-latency-1, mobile- - versatile-1, mobile-high-accuracy-1: "tflite", - "tf\_saved\_model", "tf\_js". Formats description: - - tflite - Used for Android mobile devices. - edgetpu\_tflite - - Used for `Edge TPU `__ - devices. - tf\_saved\_model - A tensorflow model in - SavedModel format. - tf\_js - A `TensorFlow.js + mobile-high-accuracy-1: “tflite” (default), “edgetpu_tflite”, + “tf_saved_model”, “tf_js”, “docker”. - For Image + Classification mobile-core-ml-low-latency-1, mobile-core- + ml-versatile-1, mobile-core-ml-high-accuracy-1: “core_ml” + (default). - For Image Object Detection mobile-low- + latency-1, mobile-versatile-1, mobile-high-accuracy-1: + “tflite”, “tf_saved_model”, “tf_js”. Formats description: + - tflite - Used for Android mobile devices. - edgetpu_tflite + - Used for `Edge TPU `__ devices. - tf_saved_model - A tensorflow model in + SavedModel format. - tf_js - A `TensorFlow.js `__ model that can be used - in the browser and in Node.js using JavaScript.x\` - core\_ml - - Used for iOS mobile devices. + in the browser and in Node.js using JavaScript. - docker - + Used for Docker containers. Use the params field to + customize the container. The container is verified to work + correctly on ubuntu 16.04 operating system. See more at + [containers quickstart](https: + //cloud.google.com/vision/automl/docs/containers-gcs- + quickstart) \* core_ml - Used for iOS mobile devices. params: Additional model-type and format specific parameters describing the requirements for the to be exported model - files, any string must be up to 25000 characters long. + files, any string must be up to 25000 characters long. - For + ``docker`` format: ``cpu_architecture`` - (string) “x86_64” + (default). ``gpu_architecture`` - (string) “none” (default), + “nvidia”. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.ModelExportOutputConfig) - ), + }, ) _sym_db.RegisterMessage(ModelExportOutputConfig) _sym_db.RegisterMessage(ModelExportOutputConfig.ParamsEntry) @@ -1557,32 +1546,30 @@ GcsSource = _reflection.GeneratedProtocolMessageType( "GcsSource", (_message.Message,), - dict( - DESCRIPTOR=_GCSSOURCE, - __module__="google.cloud.automl_v1.proto.io_pb2", - __doc__="""The Google Cloud Storage location for the input content. - + { + "DESCRIPTOR": _GCSSOURCE, + "__module__": "google.cloud.automl_v1.proto.io_pb2", + "__doc__": """The Google Cloud Storage location for the input content. Attributes: input_uris: Required. Google Cloud Storage URIs to input files, up to 2000 - characters long. Accepted forms: \* Full object path, e.g. - gs://bucket/directory/object.csv + characters long. Accepted forms: \* Full object path, + e.g. gs://bucket/directory/object.csv """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.GcsSource) - ), + }, ) _sym_db.RegisterMessage(GcsSource) GcsDestination = _reflection.GeneratedProtocolMessageType( "GcsDestination", (_message.Message,), - dict( - DESCRIPTOR=_GCSDESTINATION, - __module__="google.cloud.automl_v1.proto.io_pb2", - __doc__="""The Google Cloud Storage location where the output is to - be written to. - + { + "DESCRIPTOR": _GCSDESTINATION, + "__module__": "google.cloud.automl_v1.proto.io_pb2", + "__doc__": """The Google Cloud Storage location where the output is to be written + to. Attributes: output_uri_prefix: @@ -1590,10 +1577,10 @@ 2000 characters long. Accepted forms: \* Prefix path: gs://bucket/directory The requesting user must have write permission to the bucket. The directory is created if it - doesn't exist. + doesn’t exist. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.GcsDestination) - ), + }, ) _sym_db.RegisterMessage(GcsDestination) diff --git a/google/cloud/automl_v1/proto/model.proto b/google/cloud/automl_v1/proto/model.proto index ee080684..f5368937 100644 --- a/google/cloud/automl_v1/proto/model.proto +++ b/google/cloud/automl_v1/proto/model.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; @@ -35,7 +34,7 @@ option ruby_package = "Google::Cloud::AutoML::V1"; message Model { option (google.api.resource) = { type: "automl.googleapis.com/Model" - pattern: "projects/{project_id}/locations/{location_id}/models/{model_id}" + pattern: "projects/{project}/locations/{location}/models/{model}" }; // Deployment state of the model. diff --git a/google/cloud/automl_v1/proto/model_evaluation.proto b/google/cloud/automl_v1/proto/model_evaluation.proto index 8c768adc..601389f7 100644 --- a/google/cloud/automl_v1/proto/model_evaluation.proto +++ b/google/cloud/automl_v1/proto/model_evaluation.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,13 +11,11 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; package google.cloud.automl.v1; -import "google/api/annotations.proto"; import "google/api/resource.proto"; import "google/cloud/automl/v1/classification.proto"; import "google/cloud/automl/v1/detection.proto"; @@ -25,6 +23,7 @@ import "google/cloud/automl/v1/text_extraction.proto"; import "google/cloud/automl/v1/text_sentiment.proto"; import "google/cloud/automl/v1/translation.proto"; import "google/protobuf/timestamp.proto"; +import "google/api/annotations.proto"; option csharp_namespace = "Google.Cloud.AutoML.V1"; option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1;automl"; @@ -35,17 +34,24 @@ option ruby_package = "Google::Cloud::AutoML::V1"; // Evaluation results of a model. message ModelEvaluation { + option (google.api.resource) = { + type: "automl.googleapis.com/ModelEvaluation" + pattern: "projects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}" + }; + // Output only. Problem type specific evaluation metrics. oneof metrics { - // Model evaluation metrics for image, text classification. + // Model evaluation metrics for image, text, video and tables + // classification. + // Tables problem is considered a classification when the target column + // is CATEGORY DataType. ClassificationEvaluationMetrics classification_evaluation_metrics = 8; // Model evaluation metrics for translation. TranslationEvaluationMetrics translation_evaluation_metrics = 9; // Model evaluation metrics for image object detection. - ImageObjectDetectionEvaluationMetrics - image_object_detection_evaluation_metrics = 12; + ImageObjectDetectionEvaluationMetrics image_object_detection_evaluation_metrics = 12; // Evaluation metrics for text sentiment models. TextSentimentEvaluationMetrics text_sentiment_evaluation_metrics = 11; @@ -60,8 +66,15 @@ message ModelEvaluation { // `projects/{project_id}/locations/{location_id}/models/{model_id}/modelEvaluations/{model_evaluation_id}` string name = 1; - // Output only. The ID of the annotation spec that the model evaluation - // applies to. The The ID is empty for the overall model evaluation. + // Output only. The ID of the annotation spec that the model evaluation applies to. The + // The ID is empty for the overall model evaluation. + // For Tables annotation specs in the dataset do not exist and this ID is + // always not set, but for CLASSIFICATION + // + // [prediction_type-s][google.cloud.automl.v1.TablesModelMetadata.prediction_type] + // the + // [display_name][google.cloud.automl.v1.ModelEvaluation.display_name] + // field is used. string annotation_spec_id = 2; // Output only. The value of @@ -69,7 +82,12 @@ message ModelEvaluation { // at the moment when the model was trained. Because this field returns a // value at model training time, for different models trained from the same // dataset, the values may differ, since display names could had been changed - // between the two model's trainings. + // between the two model's trainings. For Tables CLASSIFICATION + // + // [prediction_type-s][google.cloud.automl.v1.TablesModelMetadata.prediction_type] + // distinct values of the target column at the moment of the model evaluation + // are populated here. + // The display_name is empty for the overall model evaluation. string display_name = 15; // Output only. Timestamp when this model evaluation was created. diff --git a/google/cloud/automl_v1/proto/model_evaluation_pb2.py b/google/cloud/automl_v1/proto/model_evaluation_pb2.py index 7ac909df..4975d076 100644 --- a/google/cloud/automl_v1/proto/model_evaluation_pb2.py +++ b/google/cloud/automl_v1/proto/model_evaluation_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1/proto/model_evaluation.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -15,7 +12,6 @@ _sym_db = _symbol_database.Default() -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 from google.cloud.automl_v1.proto import ( classification_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_classification__pb2, @@ -33,20 +29,17 @@ translation_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_translation__pb2, ) from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name="google/cloud/automl_v1/proto/model_evaluation.proto", package="google.cloud.automl.v1", syntax="proto3", - serialized_options=_b( - "\n\032com.google.cloud.automl.v1P\001Z params = 3; } -// Response message for -// [PredictionService.Predict][google.cloud.automl.v1.PredictionService.Predict]. +// Response message for [PredictionService.Predict][google.cloud.automl.v1.PredictionService.Predict]. message PredictResponse { // Prediction result. - // Translation and Text Sentiment will return precisely one payload. + // AutoML Translation and AutoML Natural Language Sentiment Analysis + // return precisely one payload. repeated AnnotationPayload payload = 1; // The preprocessed example that AutoML actually makes prediction on. // Empty if AutoML does not preprocess the input example. - // * For Text Extraction: - // If the input is a .pdf file, the OCR'ed text will be provided in - // [document_text][google.cloud.automl.v1.Document.document_text]. - // - // * For Text Classification: - // If the input is a .pdf file, the OCR'ed trucated text will be provided in - // [document_text][google.cloud.automl.v1.Document.document_text]. // - // * For Text Sentiment: - // If the input is a .pdf file, the OCR'ed trucated text will be provided in - // [document_text][google.cloud.automl.v1.Document.document_text]. + // For AutoML Natural Language (Classification, Entity Extraction, and + // Sentiment Analysis), if the input is a document, the recognized text is + // returned in the + // [document_text][google.cloud.automl.v1.Document.document_text] + // property. ExamplePayload preprocessed_input = 3; // Additional domain-specific prediction response metadata. // - // * For Image Object Detection: - // `max_bounding_box_count` - (int64) At most that many bounding boxes per - // image could have been returned. - // - // * For Text Sentiment: - // `sentiment_score` - (float, deprecated) A value between -1 and 1, - // -1 maps to least positive sentiment, while 1 maps to the most positive - // one and the higher the score, the more positive the sentiment in the - // document is. Yet these values are relative to the training data, so - // e.g. if all data was positive then -1 will be also positive (though - // the least). - // The sentiment_score shouldn't be confused with "score" or "magnitude" - // from the previous Natural Language Sentiment Analysis API. + // AutoML Vision Object Detection + // + // `max_bounding_box_count` + // : (int64) The maximum number of bounding boxes to return per image. + // + // AutoML Natural Language Sentiment Analysis + // + // `sentiment_score` + // : (float, deprecated) A value between -1 and 1, + // -1 maps to least positive sentiment, while 1 maps to the most positive + // one and the higher the score, the more positive the sentiment in the + // document is. Yet these values are relative to the training data, so + // e.g. if all data was positive then -1 is also positive (though + // the least). + // `sentiment_score` is not the same as "score" and "magnitude" + // from Sentiment Analysis in the Natural Language API. map metadata = 2; } -// Request message for -// [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict]. +// Request message for [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict]. message BatchPredictRequest { - // Name of the model requested to serve the batch prediction. - string name = 1; + // Required. Name of the model requested to serve the batch prediction. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "automl.googleapis.com/Model" + } + ]; // Required. The input configuration for batch prediction. - BatchPredictInputConfig input_config = 3; + BatchPredictInputConfig input_config = 3 [(google.api.field_behavior) = REQUIRED]; // Required. The Configuration specifying where output predictions should // be written. - BatchPredictOutputConfig output_config = 4; + BatchPredictOutputConfig output_config = 4 [(google.api.field_behavior) = REQUIRED]; // Additional domain-specific parameters for the predictions, any string must // be up to 25000 characters long. // - // * For Text Classification: + // AutoML Natural Language Classification + // + // `score_threshold` + // : (float) A value from 0.0 to 1.0. When the model + // makes predictions for a text snippet, it will only produce results + // that have at least this confidence score. The default is 0.5. + // + // + // AutoML Vision Classification + // + // `score_threshold` + // : (float) A value from 0.0 to 1.0. When the model + // makes predictions for an image, it will only produce results that + // have at least this confidence score. The default is 0.5. + // + // AutoML Vision Object Detection + // + // `score_threshold` + // : (float) When Model detects objects on the image, + // it will only produce bounding boxes which have at least this + // confidence score. Value in 0 to 1 range, default is 0.5. // - // `score_threshold` - (float) A value from 0.0 to 1.0. When the model - // makes predictions for a text snippet, it will only produce results - // that have at least this confidence score. The default is 0.5. + // `max_bounding_box_count` + // : (int64) The maximum number of bounding + // boxes returned per image. The default is 100, the + // number of bounding boxes returned might be limited by the server. + // AutoML Video Intelligence Classification // - // * For Image Classification: + // `score_threshold` + // : (float) A value from 0.0 to 1.0. When the model + // makes predictions for a video, it will only produce results that + // have at least this confidence score. The default is 0.5. // - // `score_threshold` - (float) A value from 0.0 to 1.0. When the model - // makes predictions for an image, it will only produce results that - // have at least this confidence score. The default is 0.5. + // `segment_classification` + // : (boolean) Set to true to request + // segment-level classification. AutoML Video Intelligence returns + // labels and their confidence scores for the entire segment of the + // video that user specified in the request configuration. + // The default is true. // - // * For Image Object Detection: + // `shot_classification` + // : (boolean) Set to true to request shot-level + // classification. AutoML Video Intelligence determines the boundaries + // for each camera shot in the entire segment of the video that user + // specified in the request configuration. AutoML Video Intelligence + // then returns labels and their confidence scores for each detected + // shot, along with the start and end time of the shot. + // The default is false. + // + // WARNING: Model evaluation is not done for this classification type, + // the quality of it depends on training data, but there are no metrics + // provided to describe that quality. + // + // `1s_interval_classification` + // : (boolean) Set to true to request + // classification for a video at one-second intervals. AutoML Video + // Intelligence returns labels and their confidence scores for each + // second of the entire segment of the video that user specified in the + // request configuration. The default is false. + // + // WARNING: Model evaluation is not done for this classification + // type, the quality of it depends on training data, but there are no + // metrics provided to describe that quality. + // + // AutoML Video Intelligence Object Tracking + // + // `score_threshold` + // : (float) When Model detects objects on video frames, + // it will only produce bounding boxes which have at least this + // confidence score. Value in 0 to 1 range, default is 0.5. + // + // `max_bounding_box_count` + // : (int64) The maximum number of bounding + // boxes returned per image. The default is 100, the + // number of bounding boxes returned might be limited by the server. + // + // `min_bounding_box_size` + // : (float) Only bounding boxes with shortest edge + // at least that long as a relative value of video frame size are + // returned. Value in 0 to 1 range. Default is 0. // - // `score_threshold` - (float) When Model detects objects on the image, - // it will only produce bounding boxes which have at least this - // confidence score. Value in 0 to 1 range, default is 0.5. - // `max_bounding_box_count` - (int64) No more than this number of bounding - // boxes will be produced per image. Default is 100, the - // requested value may be limited by server. map params = 5; } // Result of the Batch Predict. This message is returned in // [response][google.longrunning.Operation.response] of the operation returned -// by the -// [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict]. +// by the [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict]. message BatchPredictResult { // Additional domain-specific prediction response metadata. // - // * For Image Object Detection: - // `max_bounding_box_count` - (int64) At most that many bounding boxes per - // image could have been returned. + // AutoML Vision Object Detection + // + // `max_bounding_box_count` + // : (int64) The maximum number of bounding boxes returned per image. + // + // AutoML Video Intelligence Object Tracking + // + // `max_bounding_box_count` + // : (int64) The maximum number of bounding boxes returned per frame. map metadata = 1; } diff --git a/google/cloud/automl_v1/proto/prediction_service_pb2.py b/google/cloud/automl_v1/proto/prediction_service_pb2.py index 7e41c82a..0f6cfe79 100644 --- a/google/cloud/automl_v1/proto/prediction_service_pb2.py +++ b/google/cloud/automl_v1/proto/prediction_service_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1/proto/prediction_service.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -17,6 +14,7 @@ from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 from google.api import client_pb2 as google_dot_api_dot_client__pb2 +from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 from google.cloud.automl_v1.proto import ( annotation_payload_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_annotation__payload__pb2, @@ -39,15 +37,13 @@ name="google/cloud/automl_v1/proto/prediction_service.proto", package="google.cloud.automl.v1", syntax="proto3", - serialized_options=_b( - "\n\032com.google.cloud.automl.v1B\026PredictionServiceProtoP\001Z The dataset has // translation_dataset_metadata. @@ -275,98 +342,121 @@ message ListDatasetsRequest { // A token identifying a page of results for the server to return // Typically obtained via - // [ListDatasetsResponse.next_page_token][google.cloud.automl.v1.ListDatasetsResponse.next_page_token] - // of the previous + // [ListDatasetsResponse.next_page_token][google.cloud.automl.v1.ListDatasetsResponse.next_page_token] of the previous // [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets] call. string page_token = 6; } -// Response message for -// [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets]. +// Response message for [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets]. message ListDatasetsResponse { // The datasets read. repeated Dataset datasets = 1; // A token to retrieve next page of results. - // Pass to - // [ListDatasetsRequest.page_token][google.cloud.automl.v1.ListDatasetsRequest.page_token] - // to obtain that page. + // Pass to [ListDatasetsRequest.page_token][google.cloud.automl.v1.ListDatasetsRequest.page_token] to obtain that page. string next_page_token = 2; } -// Request message for -// [AutoMl.UpdateDataset][google.cloud.automl.v1.AutoMl.UpdateDataset] +// Request message for [AutoMl.UpdateDataset][google.cloud.automl.v1.AutoMl.UpdateDataset] message UpdateDatasetRequest { - // The dataset which replaces the resource on the server. - Dataset dataset = 1; + // Required. The dataset which replaces the resource on the server. + Dataset dataset = 1 [(google.api.field_behavior) = REQUIRED]; // Required. The update mask applies to the resource. - google.protobuf.FieldMask update_mask = 2; + google.protobuf.FieldMask update_mask = 2 [(google.api.field_behavior) = REQUIRED]; } -// Request message for -// [AutoMl.DeleteDataset][google.cloud.automl.v1.AutoMl.DeleteDataset]. +// Request message for [AutoMl.DeleteDataset][google.cloud.automl.v1.AutoMl.DeleteDataset]. message DeleteDatasetRequest { - // The resource name of the dataset to delete. - string name = 1; + // Required. The resource name of the dataset to delete. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "automl.googleapis.com/Dataset" + } + ]; } -// Request message for -// [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData]. +// Request message for [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData]. message ImportDataRequest { // Required. Dataset name. Dataset must already exist. All imported // annotations and examples will be added. - string name = 1; + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "automl.googleapis.com/Dataset" + } + ]; // Required. The desired input location and its domain specific semantics, // if any. - InputConfig input_config = 3; + InputConfig input_config = 3 [(google.api.field_behavior) = REQUIRED]; } -// Request message for -// [AutoMl.ExportData][google.cloud.automl.v1.AutoMl.ExportData]. +// Request message for [AutoMl.ExportData][google.cloud.automl.v1.AutoMl.ExportData]. message ExportDataRequest { // Required. The resource name of the dataset. - string name = 1; + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "automl.googleapis.com/Dataset" + } + ]; // Required. The desired output location. - OutputConfig output_config = 3; + OutputConfig output_config = 3 [(google.api.field_behavior) = REQUIRED]; } -// Request message for -// [AutoMl.GetAnnotationSpec][google.cloud.automl.v1.AutoMl.GetAnnotationSpec]. +// Request message for [AutoMl.GetAnnotationSpec][google.cloud.automl.v1.AutoMl.GetAnnotationSpec]. message GetAnnotationSpecRequest { - // The resource name of the annotation spec to retrieve. - string name = 1; + // Required. The resource name of the annotation spec to retrieve. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "automl.googleapis.com/AnnotationSpec" + } + ]; } -// Request message for -// [AutoMl.CreateModel][google.cloud.automl.v1.AutoMl.CreateModel]. +// Request message for [AutoMl.CreateModel][google.cloud.automl.v1.AutoMl.CreateModel]. message CreateModelRequest { - // Resource name of the parent project where the model is being created. - string parent = 1; - - // The model to create. - Model model = 4; + // Required. Resource name of the parent project where the model is being created. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "locations.googleapis.com/Location" + } + ]; + + // Required. The model to create. + Model model = 4 [(google.api.field_behavior) = REQUIRED]; } -// Request message for -// [AutoMl.GetModel][google.cloud.automl.v1.AutoMl.GetModel]. +// Request message for [AutoMl.GetModel][google.cloud.automl.v1.AutoMl.GetModel]. message GetModelRequest { - // Resource name of the model. - string name = 1; + // Required. Resource name of the model. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "automl.googleapis.com/Model" + } + ]; } -// Request message for -// [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels]. +// Request message for [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels]. message ListModelsRequest { - // Resource name of the project, from which to list the models. - string parent = 1; + // Required. Resource name of the project, from which to list the models. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "locations.googleapis.com/Location" + } + ]; // An expression for filtering the results of the request. // // * `model_metadata` - for existence of the case (e.g. - // image_classification_model_metadata:*). + // video_classification_model_metadata:*). // * `dataset_id` - for = or !=. Some examples of using the filter are: // // * `image_classification_model_metadata:*` --> The model has @@ -379,94 +469,112 @@ message ListModelsRequest { // A token identifying a page of results for the server to return // Typically obtained via - // [ListModelsResponse.next_page_token][google.cloud.automl.v1.ListModelsResponse.next_page_token] - // of the previous + // [ListModelsResponse.next_page_token][google.cloud.automl.v1.ListModelsResponse.next_page_token] of the previous // [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels] call. string page_token = 6; } -// Response message for -// [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels]. +// Response message for [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels]. message ListModelsResponse { // List of models in the requested page. repeated Model model = 1; // A token to retrieve next page of results. - // Pass to - // [ListModelsRequest.page_token][google.cloud.automl.v1.ListModelsRequest.page_token] - // to obtain that page. + // Pass to [ListModelsRequest.page_token][google.cloud.automl.v1.ListModelsRequest.page_token] to obtain that page. string next_page_token = 2; } -// Request message for -// [AutoMl.DeleteModel][google.cloud.automl.v1.AutoMl.DeleteModel]. +// Request message for [AutoMl.DeleteModel][google.cloud.automl.v1.AutoMl.DeleteModel]. message DeleteModelRequest { - // Resource name of the model being deleted. - string name = 1; + // Required. Resource name of the model being deleted. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "automl.googleapis.com/Model" + } + ]; } -// Request message for -// [AutoMl.UpdateModel][google.cloud.automl.v1.AutoMl.UpdateModel] +// Request message for [AutoMl.UpdateModel][google.cloud.automl.v1.AutoMl.UpdateModel] message UpdateModelRequest { - // The model which replaces the resource on the server. - Model model = 1; + // Required. The model which replaces the resource on the server. + Model model = 1 [(google.api.field_behavior) = REQUIRED]; // Required. The update mask applies to the resource. - google.protobuf.FieldMask update_mask = 2; + google.protobuf.FieldMask update_mask = 2 [(google.api.field_behavior) = REQUIRED]; } -// Request message for -// [AutoMl.DeployModel][google.cloud.automl.v1.AutoMl.DeployModel]. +// Request message for [AutoMl.DeployModel][google.cloud.automl.v1.AutoMl.DeployModel]. message DeployModelRequest { // The per-domain specific deployment parameters. oneof model_deployment_metadata { // Model deployment metadata specific to Image Object Detection. - ImageObjectDetectionModelDeploymentMetadata - image_object_detection_model_deployment_metadata = 2; + ImageObjectDetectionModelDeploymentMetadata image_object_detection_model_deployment_metadata = 2; // Model deployment metadata specific to Image Classification. - ImageClassificationModelDeploymentMetadata - image_classification_model_deployment_metadata = 4; + ImageClassificationModelDeploymentMetadata image_classification_model_deployment_metadata = 4; } - // Resource name of the model to deploy. - string name = 1; + // Required. Resource name of the model to deploy. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "automl.googleapis.com/Model" + } + ]; } -// Request message for -// [AutoMl.UndeployModel][google.cloud.automl.v1.AutoMl.UndeployModel]. +// Request message for [AutoMl.UndeployModel][google.cloud.automl.v1.AutoMl.UndeployModel]. message UndeployModelRequest { - // Resource name of the model to undeploy. - string name = 1; + // Required. Resource name of the model to undeploy. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "automl.googleapis.com/Model" + } + ]; } -// Request message for -// [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]. Models need -// to be enabled for exporting, otherwise an error code will be returned. +// Request message for [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]. +// Models need to be enabled for exporting, otherwise an error code will be +// returned. message ExportModelRequest { // Required. The resource name of the model to export. - string name = 1; + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "automl.googleapis.com/Model" + } + ]; // Required. The desired output location and configuration. - ModelExportOutputConfig output_config = 3; + ModelExportOutputConfig output_config = 3 [(google.api.field_behavior) = REQUIRED]; } -// Request message for -// [AutoMl.GetModelEvaluation][google.cloud.automl.v1.AutoMl.GetModelEvaluation]. +// Request message for [AutoMl.GetModelEvaluation][google.cloud.automl.v1.AutoMl.GetModelEvaluation]. message GetModelEvaluationRequest { - // Resource name for the model evaluation. - string name = 1; + // Required. Resource name for the model evaluation. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "automl.googleapis.com/ModelEvaluation" + } + ]; } -// Request message for -// [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations]. +// Request message for [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations]. message ListModelEvaluationsRequest { - // Resource name of the model to list the model evaluations for. + // Required. Resource name of the model to list the model evaluations for. // If modelId is set as "-", this will list model evaluations from across all // models of the parent location. - string parent = 1; - - // An expression for filtering the results of the request. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "automl.googleapis.com/Model" + } + ]; + + // Required. An expression for filtering the results of the request. // // * `annotation_spec_id` - for =, != or existence. See example below for // the last. @@ -477,31 +585,25 @@ message ListModelEvaluationsRequest { // annotation spec with ID different than 4. // * `NOT annotation_spec_id:*` --> The model evaluation was done for // aggregate of all annotation specs. - string filter = 3; + string filter = 3 [(google.api.field_behavior) = REQUIRED]; // Requested page size. int32 page_size = 4; // A token identifying a page of results for the server to return. // Typically obtained via - // [ListModelEvaluationsResponse.next_page_token][google.cloud.automl.v1.ListModelEvaluationsResponse.next_page_token] - // of the previous - // [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations] - // call. + // [ListModelEvaluationsResponse.next_page_token][google.cloud.automl.v1.ListModelEvaluationsResponse.next_page_token] of the previous + // [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations] call. string page_token = 6; } -// Response message for -// [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations]. +// Response message for [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations]. message ListModelEvaluationsResponse { // List of model evaluations in the requested page. repeated ModelEvaluation model_evaluation = 1; // A token to retrieve next page of results. - // Pass to the - // [ListModelEvaluationsRequest.page_token][google.cloud.automl.v1.ListModelEvaluationsRequest.page_token] - // field of a new - // [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations] - // request to obtain that page. + // Pass to the [ListModelEvaluationsRequest.page_token][google.cloud.automl.v1.ListModelEvaluationsRequest.page_token] field of a new + // [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations] request to obtain that page. string next_page_token = 2; } diff --git a/google/cloud/automl_v1/proto/service_pb2.py b/google/cloud/automl_v1/proto/service_pb2.py index c41a58f9..83647c90 100644 --- a/google/cloud/automl_v1/proto/service_pb2.py +++ b/google/cloud/automl_v1/proto/service_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1/proto/service.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -17,6 +14,7 @@ from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 from google.api import client_pb2 as google_dot_api_dot_client__pb2 +from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 from google.cloud.automl_v1.proto import ( annotation_payload_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_annotation__payload__pb2, @@ -52,15 +50,13 @@ name="google/cloud/automl_v1/proto/service.proto", package="google.cloud.automl.v1", syntax="proto3", - serialized_options=_b( - "\n\032com.google.cloud.automl.v1B\013AutoMlProtoP\001Z/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}\x12\x93\x01\n\x0b\x43reateModel\x12*.google.cloud.automl.v1.CreateModelRequest\x1a\x1d.google.longrunning.Operation"9\x82\xd3\xe4\x93\x02\x33"*/v1/{parent=projects/*/locations/*}/models:\x05model\x12\x86\x01\n\x08GetModel\x12\'.google.cloud.automl.v1.GetModelRequest\x1a\x1d.google.cloud.automl.v1.Model"2\x82\xd3\xe4\x93\x02,\x12*/v1/{name=projects/*/locations/*/models/*}\x12\x97\x01\n\nListModels\x12).google.cloud.automl.v1.ListModelsRequest\x1a*.google.cloud.automl.v1.ListModelsResponse"2\x82\xd3\xe4\x93\x02,\x12*/v1/{parent=projects/*/locations/*}/models\x12\x8c\x01\n\x0b\x44\x65leteModel\x12*.google.cloud.automl.v1.DeleteModelRequest\x1a\x1d.google.longrunning.Operation"2\x82\xd3\xe4\x93\x02,**/v1/{name=projects/*/locations/*/models/*}\x12\x99\x01\n\x0bUpdateModel\x12*.google.cloud.automl.v1.UpdateModelRequest\x1a\x1d.google.cloud.automl.v1.Model"?\x82\xd3\xe4\x93\x02\x39\x32\x30/v1/{model.name=projects/*/locations/*/models/*}:\x05model\x12\x96\x01\n\x0b\x44\x65ployModel\x12*.google.cloud.automl.v1.DeployModelRequest\x1a\x1d.google.longrunning.Operation"<\x82\xd3\xe4\x93\x02\x36"1/v1/{name=projects/*/locations/*/models/*}:deploy:\x01*\x12\x9c\x01\n\rUndeployModel\x12,.google.cloud.automl.v1.UndeployModelRequest\x1a\x1d.google.longrunning.Operation">\x82\xd3\xe4\x93\x02\x38"3/v1/{name=projects/*/locations/*/models/*}:undeploy:\x01*\x12\x96\x01\n\x0b\x45xportModel\x12*.google.cloud.automl.v1.ExportModelRequest\x1a\x1d.google.longrunning.Operation"<\x82\xd3\xe4\x93\x02\x36"1/v1/{name=projects/*/locations/*/models/*}:export:\x01*\x12\xb7\x01\n\x12GetModelEvaluation\x12\x31.google.cloud.automl.v1.GetModelEvaluationRequest\x1a\'.google.cloud.automl.v1.ModelEvaluation"E\x82\xd3\xe4\x93\x02?\x12=/v1/{name=projects/*/locations/*/models/*/modelEvaluations/*}\x12\xc8\x01\n\x14ListModelEvaluations\x12\x33.google.cloud.automl.v1.ListModelEvaluationsRequest\x1a\x34.google.cloud.automl.v1.ListModelEvaluationsResponse"E\x82\xd3\xe4\x93\x02?\x12=/v1/{parent=projects/*/locations/*/models/*}/modelEvaluations\x1aI\xca\x41\x15\x61utoml.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\xb7\x01\n\x1a\x63om.google.cloud.automl.v1B\x0b\x41utoMlProtoP\x01Z\n\x0cinput_config\x18\x03 \x01(\x0b\x32#.google.cloud.automl.v1.InputConfigB\x03\xe0\x41\x02"\x8a\x01\n\x11\x45xportDataRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x61utoml.googleapis.com/Dataset\x12@\n\routput_config\x18\x03 \x01(\x0b\x32$.google.cloud.automl.v1.OutputConfigB\x03\xe0\x41\x02"V\n\x18GetAnnotationSpecRequest\x12:\n\x04name\x18\x01 \x01(\tB,\xe0\x41\x02\xfa\x41&\n$automl.googleapis.com/AnnotationSpec"\x82\x01\n\x12\x43reateModelRequest\x12\x39\n\x06parent\x18\x01 \x01(\tB)\xe0\x41\x02\xfa\x41#\n!locations.googleapis.com/Location\x12\x31\n\x05model\x18\x04 \x01(\x0b\x32\x1d.google.cloud.automl.v1.ModelB\x03\xe0\x41\x02"D\n\x0fGetModelRequest\x12\x31\n\x04name\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/Model"\x85\x01\n\x11ListModelsRequest\x12\x39\n\x06parent\x18\x01 \x01(\tB)\xe0\x41\x02\xfa\x41#\n!locations.googleapis.com/Location\x12\x0e\n\x06\x66ilter\x18\x03 \x01(\t\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x06 \x01(\t"[\n\x12ListModelsResponse\x12,\n\x05model\x18\x01 \x03(\x0b\x32\x1d.google.cloud.automl.v1.Model\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"G\n\x12\x44\x65leteModelRequest\x12\x31\n\x04name\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/Model"}\n\x12UpdateModelRequest\x12\x31\n\x05model\x18\x01 \x01(\x0b\x32\x1d.google.cloud.automl.v1.ModelB\x03\xe0\x41\x02\x12\x34\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02"\xe3\x02\n\x12\x44\x65ployModelRequest\x12\x7f\n0image_object_detection_model_deployment_metadata\x18\x02 \x01(\x0b\x32\x43.google.cloud.automl.v1.ImageObjectDetectionModelDeploymentMetadataH\x00\x12|\n.image_classification_model_deployment_metadata\x18\x04 \x01(\x0b\x32\x42.google.cloud.automl.v1.ImageClassificationModelDeploymentMetadataH\x00\x12\x31\n\x04name\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/ModelB\x1b\n\x19model_deployment_metadata"I\n\x14UndeployModelRequest\x12\x31\n\x04name\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/Model"\x94\x01\n\x12\x45xportModelRequest\x12\x31\n\x04name\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/Model\x12K\n\routput_config\x18\x03 \x01(\x0b\x32/.google.cloud.automl.v1.ModelExportOutputConfigB\x03\xe0\x41\x02"X\n\x19GetModelEvaluationRequest\x12;\n\x04name\x18\x01 \x01(\tB-\xe0\x41\x02\xfa\x41\'\n%automl.googleapis.com/ModelEvaluation"\x8e\x01\n\x1bListModelEvaluationsRequest\x12\x33\n\x06parent\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/Model\x12\x13\n\x06\x66ilter\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x06 \x01(\t"z\n\x1cListModelEvaluationsResponse\x12\x41\n\x10model_evaluation\x18\x01 \x03(\x0b\x32\'.google.cloud.automl.v1.ModelEvaluation\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t2\xe8\x1b\n\x06\x41utoMl\x12\xcb\x01\n\rCreateDataset\x12,.google.cloud.automl.v1.CreateDatasetRequest\x1a\x1d.google.longrunning.Operation"m\x82\xd3\xe4\x93\x02\x37",/v1/{parent=projects/*/locations/*}/datasets:\x07\x64\x61taset\xda\x41\x0eparent,dataset\xca\x41\x1c\n\x07\x44\x61taset\x12\x11OperationMetadata\x12\x95\x01\n\nGetDataset\x12).google.cloud.automl.v1.GetDatasetRequest\x1a\x1f.google.cloud.automl.v1.Dataset";\x82\xd3\xe4\x93\x02.\x12,/v1/{name=projects/*/locations/*/datasets/*}\xda\x41\x04name\x12\xa8\x01\n\x0cListDatasets\x12+.google.cloud.automl.v1.ListDatasetsRequest\x1a,.google.cloud.automl.v1.ListDatasetsResponse"=\x82\xd3\xe4\x93\x02.\x12,/v1/{parent=projects/*/locations/*}/datasets\xda\x41\x06parent\x12\xbb\x01\n\rUpdateDataset\x12,.google.cloud.automl.v1.UpdateDatasetRequest\x1a\x1f.google.cloud.automl.v1.Dataset"[\x82\xd3\xe4\x93\x02?24/v1/{dataset.name=projects/*/locations/*/datasets/*}:\x07\x64\x61taset\xda\x41\x13\x64\x61taset,update_mask\x12\xc6\x01\n\rDeleteDataset\x12,.google.cloud.automl.v1.DeleteDatasetRequest\x1a\x1d.google.longrunning.Operation"h\x82\xd3\xe4\x93\x02.*,/v1/{name=projects/*/locations/*/datasets/*}\xda\x41\x04name\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\xdc\x01\n\nImportData\x12).google.cloud.automl.v1.ImportDataRequest\x1a\x1d.google.longrunning.Operation"\x83\x01\x82\xd3\xe4\x93\x02<"7/v1/{name=projects/*/locations/*/datasets/*}:importData:\x01*\xda\x41\x11name,input_config\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\xdd\x01\n\nExportData\x12).google.cloud.automl.v1.ExportDataRequest\x1a\x1d.google.longrunning.Operation"\x84\x01\x82\xd3\xe4\x93\x02<"7/v1/{name=projects/*/locations/*/datasets/*}:exportData:\x01*\xda\x41\x12name,output_config\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\xbc\x01\n\x11GetAnnotationSpec\x12\x30.google.cloud.automl.v1.GetAnnotationSpecRequest\x1a&.google.cloud.automl.v1.AnnotationSpec"M\x82\xd3\xe4\x93\x02@\x12>/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}\xda\x41\x04name\x12\xbf\x01\n\x0b\x43reateModel\x12*.google.cloud.automl.v1.CreateModelRequest\x1a\x1d.google.longrunning.Operation"e\x82\xd3\xe4\x93\x02\x33"*/v1/{parent=projects/*/locations/*}/models:\x05model\xda\x41\x0cparent,model\xca\x41\x1a\n\x05Model\x12\x11OperationMetadata\x12\x8d\x01\n\x08GetModel\x12\'.google.cloud.automl.v1.GetModelRequest\x1a\x1d.google.cloud.automl.v1.Model"9\x82\xd3\xe4\x93\x02,\x12*/v1/{name=projects/*/locations/*/models/*}\xda\x41\x04name\x12\xa0\x01\n\nListModels\x12).google.cloud.automl.v1.ListModelsRequest\x1a*.google.cloud.automl.v1.ListModelsResponse";\x82\xd3\xe4\x93\x02,\x12*/v1/{parent=projects/*/locations/*}/models\xda\x41\x06parent\x12\xc0\x01\n\x0b\x44\x65leteModel\x12*.google.cloud.automl.v1.DeleteModelRequest\x1a\x1d.google.longrunning.Operation"f\x82\xd3\xe4\x93\x02,**/v1/{name=projects/*/locations/*/models/*}\xda\x41\x04name\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\xad\x01\n\x0bUpdateModel\x12*.google.cloud.automl.v1.UpdateModelRequest\x1a\x1d.google.cloud.automl.v1.Model"S\x82\xd3\xe4\x93\x02\x39\x32\x30/v1/{model.name=projects/*/locations/*/models/*}:\x05model\xda\x41\x11model,update_mask\x12\xca\x01\n\x0b\x44\x65ployModel\x12*.google.cloud.automl.v1.DeployModelRequest\x1a\x1d.google.longrunning.Operation"p\x82\xd3\xe4\x93\x02\x36"1/v1/{name=projects/*/locations/*/models/*}:deploy:\x01*\xda\x41\x04name\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\xd0\x01\n\rUndeployModel\x12,.google.cloud.automl.v1.UndeployModelRequest\x1a\x1d.google.longrunning.Operation"r\x82\xd3\xe4\x93\x02\x38"3/v1/{name=projects/*/locations/*/models/*}:undeploy:\x01*\xda\x41\x04name\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\xd8\x01\n\x0b\x45xportModel\x12*.google.cloud.automl.v1.ExportModelRequest\x1a\x1d.google.longrunning.Operation"~\x82\xd3\xe4\x93\x02\x36"1/v1/{name=projects/*/locations/*/models/*}:export:\x01*\xda\x41\x12name,output_config\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\xbe\x01\n\x12GetModelEvaluation\x12\x31.google.cloud.automl.v1.GetModelEvaluationRequest\x1a\'.google.cloud.automl.v1.ModelEvaluation"L\x82\xd3\xe4\x93\x02?\x12=/v1/{name=projects/*/locations/*/models/*/modelEvaluations/*}\xda\x41\x04name\x12\xd8\x01\n\x14ListModelEvaluations\x12\x33.google.cloud.automl.v1.ListModelEvaluationsRequest\x1a\x34.google.cloud.automl.v1.ListModelEvaluationsResponse"U\x82\xd3\xe4\x93\x02?\x12=/v1/{parent=projects/*/locations/*/models/*}/modelEvaluations\xda\x41\rparent,filter\x1aI\xca\x41\x15\x61utoml.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\xb7\x01\n\x1a\x63om.google.cloud.automl.v1B\x0b\x41utoMlProtoP\x01Z The dataset has - translation\_dataset\_metadata. + ``dataset_metadata`` - for existence of the case ( + e.g. ``image_classification_dataset_metadata``). Some examples + of using the filter are: - + ``translation_dataset_metadata:*`` –> The dataset has + translation_dataset_metadata. page_size: Requested page size. Server may return fewer results than requested. If unspecified, server will pick a default size. page_token: A token identifying a page of results for the server to return - Typically obtained via [ListDatasetsResponse.next\_page\_token - ][google.cloud.automl.v1.ListDatasetsResponse.next\_page\_toke - n] of the previous [AutoMl.ListDatasets][google.cloud.automl.v - 1.AutoMl.ListDatasets] call. + Typically obtained via [ListDatasetsResponse.next_page_token][ + google.cloud.automl.v1.ListDatasetsResponse.next_page_token] + of the previous [AutoMl.ListDatasets][google.cloud.automl.v1.A + utoMl.ListDatasets] call. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.ListDatasetsRequest) - ), + }, ) _sym_db.RegisterMessage(ListDatasetsRequest) ListDatasetsResponse = _reflection.GeneratedProtocolMessageType( "ListDatasetsResponse", (_message.Message,), - dict( - DESCRIPTOR=_LISTDATASETSRESPONSE, - __module__="google.cloud.automl_v1.proto.service_pb2", - __doc__="""Response message for + { + "DESCRIPTOR": _LISTDATASETSRESPONSE, + "__module__": "google.cloud.automl_v1.proto.service_pb2", + "__doc__": """Response message for [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets]. - Attributes: datasets: The datasets read. next_page_token: A token to retrieve next page of results. Pass to [ListDataset - sRequest.page\_token][google.cloud.automl.v1.ListDatasetsReque - st.page\_token] to obtain that page. + sRequest.page_token][google.cloud.automl.v1.ListDatasetsReques + t.page_token] to obtain that page. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.ListDatasetsResponse) - ), + }, ) _sym_db.RegisterMessage(ListDatasetsResponse) UpdateDatasetRequest = _reflection.GeneratedProtocolMessageType( "UpdateDatasetRequest", (_message.Message,), - dict( - DESCRIPTOR=_UPDATEDATASETREQUEST, - __module__="google.cloud.automl_v1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _UPDATEDATASETREQUEST, + "__module__": "google.cloud.automl_v1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.UpdateDataset][google.cloud.automl.v1.AutoMl.UpdateDataset] - Attributes: dataset: - The dataset which replaces the resource on the server. + Required. The dataset which replaces the resource on the + server. update_mask: Required. The update mask applies to the resource. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.UpdateDatasetRequest) - ), + }, ) _sym_db.RegisterMessage(UpdateDatasetRequest) DeleteDatasetRequest = _reflection.GeneratedProtocolMessageType( "DeleteDatasetRequest", (_message.Message,), - dict( - DESCRIPTOR=_DELETEDATASETREQUEST, - __module__="google.cloud.automl_v1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _DELETEDATASETREQUEST, + "__module__": "google.cloud.automl_v1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.DeleteDataset][google.cloud.automl.v1.AutoMl.DeleteDataset]. - Attributes: name: - The resource name of the dataset to delete. + Required. The resource name of the dataset to delete. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.DeleteDatasetRequest) - ), + }, ) _sym_db.RegisterMessage(DeleteDatasetRequest) ImportDataRequest = _reflection.GeneratedProtocolMessageType( "ImportDataRequest", (_message.Message,), - dict( - DESCRIPTOR=_IMPORTDATAREQUEST, - __module__="google.cloud.automl_v1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _IMPORTDATAREQUEST, + "__module__": "google.cloud.automl_v1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData]. - Attributes: name: Required. Dataset name. Dataset must already exist. All @@ -1530,20 +1586,19 @@ semantics, if any. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.ImportDataRequest) - ), + }, ) _sym_db.RegisterMessage(ImportDataRequest) ExportDataRequest = _reflection.GeneratedProtocolMessageType( "ExportDataRequest", (_message.Message,), - dict( - DESCRIPTOR=_EXPORTDATAREQUEST, - __module__="google.cloud.automl_v1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _EXPORTDATAREQUEST, + "__module__": "google.cloud.automl_v1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.ExportData][google.cloud.automl.v1.AutoMl.ExportData]. - Attributes: name: Required. The resource name of the dataset. @@ -1551,179 +1606,173 @@ Required. The desired output location. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.ExportDataRequest) - ), + }, ) _sym_db.RegisterMessage(ExportDataRequest) GetAnnotationSpecRequest = _reflection.GeneratedProtocolMessageType( "GetAnnotationSpecRequest", (_message.Message,), - dict( - DESCRIPTOR=_GETANNOTATIONSPECREQUEST, - __module__="google.cloud.automl_v1.proto.service_pb2", - __doc__="""Request message for - [AutoMl.GetAnnotationSpec][google.cloud.automl.v1.AutoMl.GetAnnotationSpec]. - + { + "DESCRIPTOR": _GETANNOTATIONSPECREQUEST, + "__module__": "google.cloud.automl_v1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.GetAnnotationSpec][google.cloud.automl.v1. + AutoMl.GetAnnotationSpec]. Attributes: name: - The resource name of the annotation spec to retrieve. + Required. The resource name of the annotation spec to + retrieve. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.GetAnnotationSpecRequest) - ), + }, ) _sym_db.RegisterMessage(GetAnnotationSpecRequest) CreateModelRequest = _reflection.GeneratedProtocolMessageType( "CreateModelRequest", (_message.Message,), - dict( - DESCRIPTOR=_CREATEMODELREQUEST, - __module__="google.cloud.automl_v1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _CREATEMODELREQUEST, + "__module__": "google.cloud.automl_v1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.CreateModel][google.cloud.automl.v1.AutoMl.CreateModel]. - Attributes: parent: - Resource name of the parent project where the model is being - created. + Required. Resource name of the parent project where the model + is being created. model: - The model to create. + Required. The model to create. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.CreateModelRequest) - ), + }, ) _sym_db.RegisterMessage(CreateModelRequest) GetModelRequest = _reflection.GeneratedProtocolMessageType( "GetModelRequest", (_message.Message,), - dict( - DESCRIPTOR=_GETMODELREQUEST, - __module__="google.cloud.automl_v1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _GETMODELREQUEST, + "__module__": "google.cloud.automl_v1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.GetModel][google.cloud.automl.v1.AutoMl.GetModel]. - Attributes: name: - Resource name of the model. + Required. Resource name of the model. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.GetModelRequest) - ), + }, ) _sym_db.RegisterMessage(GetModelRequest) ListModelsRequest = _reflection.GeneratedProtocolMessageType( "ListModelsRequest", (_message.Message,), - dict( - DESCRIPTOR=_LISTMODELSREQUEST, - __module__="google.cloud.automl_v1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _LISTMODELSREQUEST, + "__module__": "google.cloud.automl_v1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels]. - Attributes: parent: - Resource name of the project, from which to list the models. + Required. Resource name of the project, from which to list the + models. filter: An expression for filtering the results of the request. - - ``model_metadata`` - for existence of the case (e.g. - image\_classification\_model\_metadata:\*). - ``dataset_id`` + ``model_metadata`` - for existence of the case ( + e.g. ``video_classification_model_metadata:*``). - ``dataset_id`` - for = or !=. Some examples of using the filter are: - - ``image_classification_model_metadata:*`` --> The model has - image\_classification\_model\_metadata. - ``dataset_id=5`` - --> The model was created from a dataset with ID 5. + ``image_classification_model_metadata:*`` –> The model has + image_classification_model_metadata. - ``dataset_id=5`` –> + The model was created from a dataset with ID 5. page_size: Requested page size. page_token: A token identifying a page of results for the server to return - Typically obtained via [ListModelsResponse.next\_page\_token][ - google.cloud.automl.v1.ListModelsResponse.next\_page\_token] - of the previous + Typically obtained via [ListModelsResponse.next_page_token][go + ogle.cloud.automl.v1.ListModelsResponse.next_page_token] of + the previous [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels] call. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.ListModelsRequest) - ), + }, ) _sym_db.RegisterMessage(ListModelsRequest) ListModelsResponse = _reflection.GeneratedProtocolMessageType( "ListModelsResponse", (_message.Message,), - dict( - DESCRIPTOR=_LISTMODELSRESPONSE, - __module__="google.cloud.automl_v1.proto.service_pb2", - __doc__="""Response message for + { + "DESCRIPTOR": _LISTMODELSRESPONSE, + "__module__": "google.cloud.automl_v1.proto.service_pb2", + "__doc__": """Response message for [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels]. - Attributes: model: List of models in the requested page. next_page_token: A token to retrieve next page of results. Pass to [ListModelsR - equest.page\_token][google.cloud.automl.v1.ListModelsRequest.p - age\_token] to obtain that page. + equest.page_token][google.cloud.automl.v1.ListModelsRequest.pa + ge_token] to obtain that page. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.ListModelsResponse) - ), + }, ) _sym_db.RegisterMessage(ListModelsResponse) DeleteModelRequest = _reflection.GeneratedProtocolMessageType( "DeleteModelRequest", (_message.Message,), - dict( - DESCRIPTOR=_DELETEMODELREQUEST, - __module__="google.cloud.automl_v1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _DELETEMODELREQUEST, + "__module__": "google.cloud.automl_v1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.DeleteModel][google.cloud.automl.v1.AutoMl.DeleteModel]. - Attributes: name: - Resource name of the model being deleted. + Required. Resource name of the model being deleted. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.DeleteModelRequest) - ), + }, ) _sym_db.RegisterMessage(DeleteModelRequest) UpdateModelRequest = _reflection.GeneratedProtocolMessageType( "UpdateModelRequest", (_message.Message,), - dict( - DESCRIPTOR=_UPDATEMODELREQUEST, - __module__="google.cloud.automl_v1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _UPDATEMODELREQUEST, + "__module__": "google.cloud.automl_v1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.UpdateModel][google.cloud.automl.v1.AutoMl.UpdateModel] - Attributes: model: - The model which replaces the resource on the server. + Required. The model which replaces the resource on the server. update_mask: Required. The update mask applies to the resource. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.UpdateModelRequest) - ), + }, ) _sym_db.RegisterMessage(UpdateModelRequest) DeployModelRequest = _reflection.GeneratedProtocolMessageType( "DeployModelRequest", (_message.Message,), - dict( - DESCRIPTOR=_DEPLOYMODELREQUEST, - __module__="google.cloud.automl_v1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _DEPLOYMODELREQUEST, + "__module__": "google.cloud.automl_v1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.DeployModel][google.cloud.automl.v1.AutoMl.DeployModel]. - Attributes: model_deployment_metadata: The per-domain specific deployment parameters. @@ -1732,43 +1781,41 @@ image_classification_model_deployment_metadata: Model deployment metadata specific to Image Classification. name: - Resource name of the model to deploy. + Required. Resource name of the model to deploy. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.DeployModelRequest) - ), + }, ) _sym_db.RegisterMessage(DeployModelRequest) UndeployModelRequest = _reflection.GeneratedProtocolMessageType( "UndeployModelRequest", (_message.Message,), - dict( - DESCRIPTOR=_UNDEPLOYMODELREQUEST, - __module__="google.cloud.automl_v1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _UNDEPLOYMODELREQUEST, + "__module__": "google.cloud.automl_v1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.UndeployModel][google.cloud.automl.v1.AutoMl.UndeployModel]. - Attributes: name: - Resource name of the model to undeploy. + Required. Resource name of the model to undeploy. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.UndeployModelRequest) - ), + }, ) _sym_db.RegisterMessage(UndeployModelRequest) ExportModelRequest = _reflection.GeneratedProtocolMessageType( "ExportModelRequest", (_message.Message,), - dict( - DESCRIPTOR=_EXPORTMODELREQUEST, - __module__="google.cloud.automl_v1.proto.service_pb2", - __doc__="""Request message for - [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]. Models - need to be enabled for exporting, otherwise an error code will be - returned. - + { + "DESCRIPTOR": _EXPORTMODELREQUEST, + "__module__": "google.cloud.automl_v1.proto.service_pb2", + "__doc__": """Request message for + [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]. + Models need to be enabled for exporting, otherwise an error code will + be returned. Attributes: name: @@ -1777,105 +1824,128 @@ Required. The desired output location and configuration. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.ExportModelRequest) - ), + }, ) _sym_db.RegisterMessage(ExportModelRequest) GetModelEvaluationRequest = _reflection.GeneratedProtocolMessageType( "GetModelEvaluationRequest", (_message.Message,), - dict( - DESCRIPTOR=_GETMODELEVALUATIONREQUEST, - __module__="google.cloud.automl_v1.proto.service_pb2", - __doc__="""Request message for - [AutoMl.GetModelEvaluation][google.cloud.automl.v1.AutoMl.GetModelEvaluation]. - + { + "DESCRIPTOR": _GETMODELEVALUATIONREQUEST, + "__module__": "google.cloud.automl_v1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.GetModelEvaluation][google.cloud.automl.v1 + .AutoMl.GetModelEvaluation]. Attributes: name: - Resource name for the model evaluation. + Required. Resource name for the model evaluation. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.GetModelEvaluationRequest) - ), + }, ) _sym_db.RegisterMessage(GetModelEvaluationRequest) ListModelEvaluationsRequest = _reflection.GeneratedProtocolMessageType( "ListModelEvaluationsRequest", (_message.Message,), - dict( - DESCRIPTOR=_LISTMODELEVALUATIONSREQUEST, - __module__="google.cloud.automl_v1.proto.service_pb2", - __doc__="""Request message for - [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations]. - + { + "DESCRIPTOR": _LISTMODELEVALUATIONSREQUEST, + "__module__": "google.cloud.automl_v1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.ListModelEvaluations][google.cloud.automl. + v1.AutoMl.ListModelEvaluations]. Attributes: parent: - Resource name of the model to list the model evaluations for. - If modelId is set as "-", this will list model evaluations - from across all models of the parent location. + Required. Resource name of the model to list the model + evaluations for. If modelId is set as “-”, this will list + model evaluations from across all models of the parent + location. filter: - An expression for filtering the results of the request. - - ``annotation_spec_id`` - for =, != or existence. See example - below for the last. Some examples of using the filter are: - - ``annotation_spec_id!=4`` --> The model evaluation was done - for annotation spec with ID different than 4. - ``NOT - annotation_spec_id:*`` --> The model evaluation was done for - aggregate of all annotation specs. + Required. An expression for filtering the results of the + request. - ``annotation_spec_id`` - for =, != or existence. + See example below for the last. Some examples of using the + filter are: - ``annotation_spec_id!=4`` –> The model + evaluation was done for annotation spec with ID different + than 4. - ``NOT annotation_spec_id:*`` –> The model + evaluation was done for aggregate of all annotation specs. page_size: Requested page size. page_token: A token identifying a page of results for the server to return. Typically obtained via [ListModelEvaluationsResponse.n - ext\_page\_token][google.cloud.automl.v1.ListModelEvaluationsR - esponse.next\_page\_token] of the previous [AutoMl.ListModelEv - aluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations] + ext_page_token][google.cloud.automl.v1.ListModelEvaluationsRes + ponse.next_page_token] of the previous [AutoMl.ListModelEvalua + tions][google.cloud.automl.v1.AutoMl.ListModelEvaluations] call. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.ListModelEvaluationsRequest) - ), + }, ) _sym_db.RegisterMessage(ListModelEvaluationsRequest) ListModelEvaluationsResponse = _reflection.GeneratedProtocolMessageType( "ListModelEvaluationsResponse", (_message.Message,), - dict( - DESCRIPTOR=_LISTMODELEVALUATIONSRESPONSE, - __module__="google.cloud.automl_v1.proto.service_pb2", - __doc__="""Response message for - [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations]. - + { + "DESCRIPTOR": _LISTMODELEVALUATIONSRESPONSE, + "__module__": "google.cloud.automl_v1.proto.service_pb2", + "__doc__": """Response message for [AutoMl.ListModelEvaluations][google.cloud.automl + .v1.AutoMl.ListModelEvaluations]. Attributes: model_evaluation: List of model evaluations in the requested page. next_page_token: A token to retrieve next page of results. Pass to the [ListMod - elEvaluationsRequest.page\_token][google.cloud.automl.v1.ListM - odelEvaluationsRequest.page\_token] field of a new [AutoMl.Lis - tModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEval - uations] request to obtain that page. + elEvaluationsRequest.page_token][google.cloud.automl.v1.ListMo + delEvaluationsRequest.page_token] field of a new [AutoMl.ListM + odelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvalua + tions] request to obtain that page. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1.ListModelEvaluationsResponse) - ), + }, ) _sym_db.RegisterMessage(ListModelEvaluationsResponse) DESCRIPTOR._options = None +_CREATEDATASETREQUEST.fields_by_name["parent"]._options = None +_CREATEDATASETREQUEST.fields_by_name["dataset"]._options = None +_GETDATASETREQUEST.fields_by_name["name"]._options = None +_LISTDATASETSREQUEST.fields_by_name["parent"]._options = None +_UPDATEDATASETREQUEST.fields_by_name["dataset"]._options = None +_UPDATEDATASETREQUEST.fields_by_name["update_mask"]._options = None +_DELETEDATASETREQUEST.fields_by_name["name"]._options = None +_IMPORTDATAREQUEST.fields_by_name["name"]._options = None +_IMPORTDATAREQUEST.fields_by_name["input_config"]._options = None +_EXPORTDATAREQUEST.fields_by_name["name"]._options = None +_EXPORTDATAREQUEST.fields_by_name["output_config"]._options = None +_GETANNOTATIONSPECREQUEST.fields_by_name["name"]._options = None +_CREATEMODELREQUEST.fields_by_name["parent"]._options = None +_CREATEMODELREQUEST.fields_by_name["model"]._options = None +_GETMODELREQUEST.fields_by_name["name"]._options = None +_LISTMODELSREQUEST.fields_by_name["parent"]._options = None +_DELETEMODELREQUEST.fields_by_name["name"]._options = None +_UPDATEMODELREQUEST.fields_by_name["model"]._options = None +_UPDATEMODELREQUEST.fields_by_name["update_mask"]._options = None +_DEPLOYMODELREQUEST.fields_by_name["name"]._options = None +_UNDEPLOYMODELREQUEST.fields_by_name["name"]._options = None +_EXPORTMODELREQUEST.fields_by_name["name"]._options = None +_EXPORTMODELREQUEST.fields_by_name["output_config"]._options = None +_GETMODELEVALUATIONREQUEST.fields_by_name["name"]._options = None +_LISTMODELEVALUATIONSREQUEST.fields_by_name["parent"]._options = None +_LISTMODELEVALUATIONSREQUEST.fields_by_name["filter"]._options = None _AUTOML = _descriptor.ServiceDescriptor( name="AutoMl", full_name="google.cloud.automl.v1.AutoMl", file=DESCRIPTOR, index=0, - serialized_options=_b( - "\312A\025automl.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform" - ), - serialized_start=2501, - serialized_end=5458, + serialized_options=b"\312A\025automl.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform", + create_key=_descriptor._internal_create_key, + serialized_start=3236, + serialized_end=6796, methods=[ _descriptor.MethodDescriptor( name="CreateDataset", @@ -1884,9 +1954,8 @@ containing_service=None, input_type=_CREATEDATASETREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - '\202\323\344\223\0027",/v1/{parent=projects/*/locations/*}/datasets:\007dataset' - ), + serialized_options=b'\202\323\344\223\0027",/v1/{parent=projects/*/locations/*}/datasets:\007dataset\332A\016parent,dataset\312A\034\n\007Dataset\022\021OperationMetadata', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="GetDataset", @@ -1895,9 +1964,8 @@ containing_service=None, input_type=_GETDATASETREQUEST, output_type=google_dot_cloud_dot_automl__v1_dot_proto_dot_dataset__pb2._DATASET, - serialized_options=_b( - "\202\323\344\223\002.\022,/v1/{name=projects/*/locations/*/datasets/*}" - ), + serialized_options=b"\202\323\344\223\002.\022,/v1/{name=projects/*/locations/*/datasets/*}\332A\004name", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="ListDatasets", @@ -1906,9 +1974,8 @@ containing_service=None, input_type=_LISTDATASETSREQUEST, output_type=_LISTDATASETSRESPONSE, - serialized_options=_b( - "\202\323\344\223\002.\022,/v1/{parent=projects/*/locations/*}/datasets" - ), + serialized_options=b"\202\323\344\223\002.\022,/v1/{parent=projects/*/locations/*}/datasets\332A\006parent", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="UpdateDataset", @@ -1917,9 +1984,8 @@ containing_service=None, input_type=_UPDATEDATASETREQUEST, output_type=google_dot_cloud_dot_automl__v1_dot_proto_dot_dataset__pb2._DATASET, - serialized_options=_b( - "\202\323\344\223\002?24/v1/{dataset.name=projects/*/locations/*/datasets/*}:\007dataset" - ), + serialized_options=b"\202\323\344\223\002?24/v1/{dataset.name=projects/*/locations/*/datasets/*}:\007dataset\332A\023dataset,update_mask", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="DeleteDataset", @@ -1928,9 +1994,8 @@ containing_service=None, input_type=_DELETEDATASETREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - "\202\323\344\223\002.*,/v1/{name=projects/*/locations/*/datasets/*}" - ), + serialized_options=b"\202\323\344\223\002.*,/v1/{name=projects/*/locations/*/datasets/*}\332A\004name\312A*\n\025google.protobuf.Empty\022\021OperationMetadata", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="ImportData", @@ -1939,9 +2004,8 @@ containing_service=None, input_type=_IMPORTDATAREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - '\202\323\344\223\002<"7/v1/{name=projects/*/locations/*/datasets/*}:importData:\001*' - ), + serialized_options=b'\202\323\344\223\002<"7/v1/{name=projects/*/locations/*/datasets/*}:importData:\001*\332A\021name,input_config\312A*\n\025google.protobuf.Empty\022\021OperationMetadata', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="ExportData", @@ -1950,9 +2014,8 @@ containing_service=None, input_type=_EXPORTDATAREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - '\202\323\344\223\002<"7/v1/{name=projects/*/locations/*/datasets/*}:exportData:\001*' - ), + serialized_options=b'\202\323\344\223\002<"7/v1/{name=projects/*/locations/*/datasets/*}:exportData:\001*\332A\022name,output_config\312A*\n\025google.protobuf.Empty\022\021OperationMetadata', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="GetAnnotationSpec", @@ -1961,9 +2024,8 @@ containing_service=None, input_type=_GETANNOTATIONSPECREQUEST, output_type=google_dot_cloud_dot_automl__v1_dot_proto_dot_annotation__spec__pb2._ANNOTATIONSPEC, - serialized_options=_b( - "\202\323\344\223\002@\022>/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}" - ), + serialized_options=b"\202\323\344\223\002@\022>/v1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}\332A\004name", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="CreateModel", @@ -1972,9 +2034,8 @@ containing_service=None, input_type=_CREATEMODELREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - '\202\323\344\223\0023"*/v1/{parent=projects/*/locations/*}/models:\005model' - ), + serialized_options=b'\202\323\344\223\0023"*/v1/{parent=projects/*/locations/*}/models:\005model\332A\014parent,model\312A\032\n\005Model\022\021OperationMetadata', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="GetModel", @@ -1983,9 +2044,8 @@ containing_service=None, input_type=_GETMODELREQUEST, output_type=google_dot_cloud_dot_automl__v1_dot_proto_dot_model__pb2._MODEL, - serialized_options=_b( - "\202\323\344\223\002,\022*/v1/{name=projects/*/locations/*/models/*}" - ), + serialized_options=b"\202\323\344\223\002,\022*/v1/{name=projects/*/locations/*/models/*}\332A\004name", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="ListModels", @@ -1994,9 +2054,8 @@ containing_service=None, input_type=_LISTMODELSREQUEST, output_type=_LISTMODELSRESPONSE, - serialized_options=_b( - "\202\323\344\223\002,\022*/v1/{parent=projects/*/locations/*}/models" - ), + serialized_options=b"\202\323\344\223\002,\022*/v1/{parent=projects/*/locations/*}/models\332A\006parent", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="DeleteModel", @@ -2005,9 +2064,8 @@ containing_service=None, input_type=_DELETEMODELREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - "\202\323\344\223\002,**/v1/{name=projects/*/locations/*/models/*}" - ), + serialized_options=b"\202\323\344\223\002,**/v1/{name=projects/*/locations/*/models/*}\332A\004name\312A*\n\025google.protobuf.Empty\022\021OperationMetadata", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="UpdateModel", @@ -2016,9 +2074,8 @@ containing_service=None, input_type=_UPDATEMODELREQUEST, output_type=google_dot_cloud_dot_automl__v1_dot_proto_dot_model__pb2._MODEL, - serialized_options=_b( - "\202\323\344\223\002920/v1/{model.name=projects/*/locations/*/models/*}:\005model" - ), + serialized_options=b"\202\323\344\223\002920/v1/{model.name=projects/*/locations/*/models/*}:\005model\332A\021model,update_mask", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="DeployModel", @@ -2027,9 +2084,8 @@ containing_service=None, input_type=_DEPLOYMODELREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - '\202\323\344\223\0026"1/v1/{name=projects/*/locations/*/models/*}:deploy:\001*' - ), + serialized_options=b'\202\323\344\223\0026"1/v1/{name=projects/*/locations/*/models/*}:deploy:\001*\332A\004name\312A*\n\025google.protobuf.Empty\022\021OperationMetadata', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="UndeployModel", @@ -2038,9 +2094,8 @@ containing_service=None, input_type=_UNDEPLOYMODELREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - '\202\323\344\223\0028"3/v1/{name=projects/*/locations/*/models/*}:undeploy:\001*' - ), + serialized_options=b'\202\323\344\223\0028"3/v1/{name=projects/*/locations/*/models/*}:undeploy:\001*\332A\004name\312A*\n\025google.protobuf.Empty\022\021OperationMetadata', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="ExportModel", @@ -2049,9 +2104,8 @@ containing_service=None, input_type=_EXPORTMODELREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - '\202\323\344\223\0026"1/v1/{name=projects/*/locations/*/models/*}:export:\001*' - ), + serialized_options=b'\202\323\344\223\0026"1/v1/{name=projects/*/locations/*/models/*}:export:\001*\332A\022name,output_config\312A*\n\025google.protobuf.Empty\022\021OperationMetadata', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="GetModelEvaluation", @@ -2060,9 +2114,8 @@ containing_service=None, input_type=_GETMODELEVALUATIONREQUEST, output_type=google_dot_cloud_dot_automl__v1_dot_proto_dot_model__evaluation__pb2._MODELEVALUATION, - serialized_options=_b( - "\202\323\344\223\002?\022=/v1/{name=projects/*/locations/*/models/*/modelEvaluations/*}" - ), + serialized_options=b"\202\323\344\223\002?\022=/v1/{name=projects/*/locations/*/models/*/modelEvaluations/*}\332A\004name", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="ListModelEvaluations", @@ -2071,9 +2124,8 @@ containing_service=None, input_type=_LISTMODELEVALUATIONSREQUEST, output_type=_LISTMODELEVALUATIONSRESPONSE, - serialized_options=_b( - "\202\323\344\223\002?\022=/v1/{parent=projects/*/locations/*/models/*}/modelEvaluations" - ), + serialized_options=b"\202\323\344\223\002?\022=/v1/{parent=projects/*/locations/*/models/*}/modelEvaluations\332A\rparent,filter", + create_key=_descriptor._internal_create_key, ), ], ) diff --git a/google/cloud/automl_v1/proto/service_pb2_grpc.py b/google/cloud/automl_v1/proto/service_pb2_grpc.py index 0ad90914..2f9a2837 100644 --- a/google/cloud/automl_v1/proto/service_pb2_grpc.py +++ b/google/cloud/automl_v1/proto/service_pb2_grpc.py @@ -194,6 +194,14 @@ def DeleteDataset(self, request, context): def ImportData(self, request, context): """Imports data into a dataset. + For Tables this method can only be called on an empty Dataset. + + For Tables: + * A + [schema_inference_version][google.cloud.automl.v1.InputConfig.params] + parameter must be explicitly set. + Returns an empty response in the + [response][google.longrunning.Operation.response] field when it completes. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") @@ -263,11 +271,11 @@ def DeployModel(self, request, context): same parameters has no effect. Deploying with different parametrs (as e.g. changing - [node_number][google.cloud.automl.v1.ImageObjectDetectionModelDeploymentMetadata.node_number]) + [node_number][google.cloud.automl.v1p1beta.ImageObjectDetectionModelDeploymentMetadata.node_number]) will reset the deployment state without pausing the model's availability. - Only applicable for Text Classification, Image Object Detection; all other - domains manage deployment automatically. + Only applicable for Text Classification, Image Object Detection , Tables, and Image Segmentation; all other domains manage + deployment automatically. Returns an empty response in the [response][google.longrunning.Operation.response] field when it completes. @@ -279,7 +287,7 @@ def DeployModel(self, request, context): def UndeployModel(self, request, context): """Undeploys a model. If the model is not deployed this method has no effect. - Only applicable for Text Classification, Image Object Detection; + Only applicable for Text Classification, Image Object Detection and Tables; all other domains manage deployment automatically. Returns an empty response in the diff --git a/google/cloud/automl_v1/proto/text.proto b/google/cloud/automl_v1/proto/text.proto index bffe9634..667031b8 100644 --- a/google/cloud/automl_v1/proto/text.proto +++ b/google/cloud/automl_v1/proto/text.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,14 +11,13 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; package google.cloud.automl.v1; -import "google/api/annotations.proto"; import "google/cloud/automl/v1/classification.proto"; +import "google/api/annotations.proto"; option csharp_namespace = "Google.Cloud.AutoML.V1"; option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1;automl"; @@ -41,20 +40,27 @@ message TextClassificationModelMetadata { } // Dataset metadata that is specific to text extraction -message TextExtractionDatasetMetadata {} +message TextExtractionDatasetMetadata { + +} // Model metadata that is specific to text extraction. -message TextExtractionModelMetadata {} +message TextExtractionModelMetadata { + +} // Dataset metadata for text sentiment. message TextSentimentDatasetMetadata { - // Required. A sentiment is expressed as an integer ordinal, where higher - // value means a more positive sentiment. The range of sentiments that will be - // used is between 0 and sentiment_max (inclusive on both ends), and all the - // values in the range must be represented in the dataset before a model can - // be created. sentiment_max value must be between 1 and 10 (inclusive). + // Required. A sentiment is expressed as an integer ordinal, where higher value + // means a more positive sentiment. The range of sentiments that will be used + // is between 0 and sentiment_max (inclusive on both ends), and all the values + // in the range must be represented in the dataset before a model can be + // created. + // sentiment_max value must be between 1 and 10 (inclusive). int32 sentiment_max = 1; } // Model metadata that is specific to text sentiment. -message TextSentimentModelMetadata {} +message TextSentimentModelMetadata { + +} diff --git a/google/cloud/automl_v1/proto/text_extraction.proto b/google/cloud/automl_v1/proto/text_extraction.proto index 02119f5c..37a31e71 100644 --- a/google/cloud/automl_v1/proto/text_extraction.proto +++ b/google/cloud/automl_v1/proto/text_extraction.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; diff --git a/google/cloud/automl_v1/proto/text_extraction_pb2.py b/google/cloud/automl_v1/proto/text_extraction_pb2.py index c1106e25..951204b3 100644 --- a/google/cloud/automl_v1/proto/text_extraction_pb2.py +++ b/google/cloud/automl_v1/proto/text_extraction_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1/proto/text_extraction.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -25,12 +22,9 @@ name="google/cloud/automl_v1/proto/text_extraction.proto", package="google.cloud.automl.v1", syntax="proto3", - serialized_options=_b( - "\n\032com.google.cloud.automl.v1P\001Z>> response = client.create_dataset(parent, dataset) Args: - parent (str): The resource name of the project to create the dataset for. - dataset (Union[dict, ~google.cloud.automl_v1beta1.types.Dataset]): The dataset to create. + parent (str): Required. The resource name of the project to create the dataset for. + dataset (Union[dict, ~google.cloud.automl_v1beta1.types.Dataset]): Required. The dataset to create. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.automl_v1beta1.types.Dataset` @@ -347,7 +350,7 @@ def create_dataset( client_info=self._client_info, ) - request = service_pb2.CreateDatasetRequest(parent=parent, dataset=dataset) + request = service_pb2.CreateDatasetRequest(parent=parent, dataset=dataset,) if metadata is None: metadata = [] metadata = list(metadata) @@ -365,86 +368,6 @@ def create_dataset( request, retry=retry, timeout=timeout, metadata=metadata ) - def update_dataset( - self, - dataset, - update_mask=None, - retry=google.api_core.gapic_v1.method.DEFAULT, - timeout=google.api_core.gapic_v1.method.DEFAULT, - metadata=None, - ): - """ - Updates a dataset. - - Example: - >>> from google.cloud import automl_v1beta1 - >>> - >>> client = automl_v1beta1.AutoMlClient() - >>> - >>> # TODO: Initialize `dataset`: - >>> dataset = {} - >>> - >>> response = client.update_dataset(dataset) - - Args: - dataset (Union[dict, ~google.cloud.automl_v1beta1.types.Dataset]): The dataset which replaces the resource on the server. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.Dataset` - update_mask (Union[dict, ~google.cloud.automl_v1beta1.types.FieldMask]): The update mask applies to the resource. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.FieldMask` - retry (Optional[google.api_core.retry.Retry]): A retry object used - to retry requests. If ``None`` is specified, requests will - be retried using a default configuration. - timeout (Optional[float]): The amount of time, in seconds, to wait - for the request to complete. Note that if ``retry`` is - specified, the timeout applies to each individual attempt. - metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata - that is provided to the method. - - Returns: - A :class:`~google.cloud.automl_v1beta1.types.Dataset` instance. - - Raises: - google.api_core.exceptions.GoogleAPICallError: If the request - failed for any reason. - google.api_core.exceptions.RetryError: If the request failed due - to a retryable error and retry attempts failed. - ValueError: If the parameters are invalid. - """ - # Wrap the transport method to add retry and timeout logic. - if "update_dataset" not in self._inner_api_calls: - self._inner_api_calls[ - "update_dataset" - ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_dataset, - default_retry=self._method_configs["UpdateDataset"].retry, - default_timeout=self._method_configs["UpdateDataset"].timeout, - client_info=self._client_info, - ) - - request = service_pb2.UpdateDatasetRequest( - dataset=dataset, update_mask=update_mask - ) - if metadata is None: - metadata = [] - metadata = list(metadata) - try: - routing_header = [("dataset.name", dataset.name)] - except AttributeError: - pass - else: - routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( - routing_header - ) - metadata.append(routing_metadata) - - return self._inner_api_calls["update_dataset"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - def get_dataset( self, name, @@ -465,7 +388,7 @@ def get_dataset( >>> response = client.get_dataset(name) Args: - name (str): The resource name of the dataset to retrieve. + name (str): Required. The resource name of the dataset to retrieve. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -496,7 +419,7 @@ def get_dataset( client_info=self._client_info, ) - request = service_pb2.GetDatasetRequest(name=name) + request = service_pb2.GetDatasetRequest(name=name,) if metadata is None: metadata = [] metadata = list(metadata) @@ -548,15 +471,15 @@ def list_datasets( ... pass Args: - parent (str): The resource name of the project from which to list datasets. + parent (str): Required. The resource name of the project from which to list datasets. filter_ (str): An expression for filtering the results of the request. - ``dataset_metadata`` - for existence of the case (e.g. - image\_classification\_dataset\_metadata:\*). Some examples of using - the filter are: + ``image_classification_dataset_metadata``). Some examples of using the + filter are: - ``translation_dataset_metadata:*`` --> The dataset has - translation\_dataset\_metadata. + translation_dataset_metadata. page_size (int): The maximum number of resources contained in the underlying API response. If page streaming is performed per- resource, this parameter does not affect the return value. If page @@ -596,7 +519,7 @@ def list_datasets( ) request = service_pb2.ListDatasetsRequest( - parent=parent, filter=filter_, page_size=page_size + parent=parent, filter=filter_, page_size=page_size, ) if metadata is None: metadata = [] @@ -626,6 +549,86 @@ def list_datasets( ) return iterator + def update_dataset( + self, + dataset, + update_mask=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Updates a dataset. + + Example: + >>> from google.cloud import automl_v1beta1 + >>> + >>> client = automl_v1beta1.AutoMlClient() + >>> + >>> # TODO: Initialize `dataset`: + >>> dataset = {} + >>> + >>> response = client.update_dataset(dataset) + + Args: + dataset (Union[dict, ~google.cloud.automl_v1beta1.types.Dataset]): Required. The dataset which replaces the resource on the server. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.automl_v1beta1.types.Dataset` + update_mask (Union[dict, ~google.cloud.automl_v1beta1.types.FieldMask]): The update mask applies to the resource. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.automl_v1beta1.types.FieldMask` + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.automl_v1beta1.types.Dataset` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "update_dataset" not in self._inner_api_calls: + self._inner_api_calls[ + "update_dataset" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.update_dataset, + default_retry=self._method_configs["UpdateDataset"].retry, + default_timeout=self._method_configs["UpdateDataset"].timeout, + client_info=self._client_info, + ) + + request = service_pb2.UpdateDatasetRequest( + dataset=dataset, update_mask=update_mask, + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("dataset.name", dataset.name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + return self._inner_api_calls["update_dataset"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + def delete_dataset( self, name, @@ -634,8 +637,8 @@ def delete_dataset( metadata=None, ): """ - Deletes a dataset and all of its contents. Returns empty response in the - ``response`` field when it completes, and ``delete_details`` in the + Deletes a dataset and all of its contents. Returns empty response in + the ``response`` field when it completes, and ``delete_details`` in the ``metadata`` field. Example: @@ -657,7 +660,7 @@ def delete_dataset( >>> metadata = response.metadata() Args: - name (str): The resource name of the dataset to delete. + name (str): Required. The resource name of the dataset to delete. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -688,7 +691,7 @@ def delete_dataset( client_info=self._client_info, ) - request = service_pb2.DeleteDatasetRequest(name=name) + request = service_pb2.DeleteDatasetRequest(name=name,) if metadata is None: metadata = [] metadata = list(metadata) @@ -721,8 +724,8 @@ def import_data( metadata=None, ): """ - Imports data into a dataset. For Tables this method can only be called - on an empty Dataset. + Imports data into a dataset. For Tables this method can only be + called on an empty Dataset. For Tables: @@ -789,7 +792,7 @@ def import_data( client_info=self._client_info, ) - request = service_pb2.ImportDataRequest(name=name, input_config=input_config) + request = service_pb2.ImportDataRequest(name=name, input_config=input_config,) if metadata is None: metadata = [] metadata = list(metadata) @@ -822,8 +825,8 @@ def export_data( metadata=None, ): """ - Exports dataset's data to the provided output location. Returns an empty - response in the ``response`` field when it completes. + Exports dataset's data to the provided output location. Returns an + empty response in the ``response`` field when it completes. Example: >>> from google.cloud import automl_v1beta1 @@ -882,7 +885,7 @@ def export_data( client_info=self._client_info, ) - request = service_pb2.ExportDataRequest(name=name, output_config=output_config) + request = service_pb2.ExportDataRequest(name=name, output_config=output_config,) if metadata is None: metadata = [] metadata = list(metadata) @@ -906,47 +909,27 @@ def export_data( metadata_type=proto_operations_pb2.OperationMetadata, ) - def create_model( + def get_annotation_spec( self, - parent, - model, + name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Creates a model. Returns a Model in the ``response`` field when it - completes. When you create a model, several model evaluations are - created for it: a global evaluation, and one evaluation for each - annotation spec. + Gets an annotation spec. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> parent = client.location_path('[PROJECT]', '[LOCATION]') - >>> - >>> # TODO: Initialize `model`: - >>> model = {} - >>> - >>> response = client.create_model(parent, model) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) + >>> name = client.annotation_spec_path('[PROJECT]', '[LOCATION]', '[DATASET]', '[ANNOTATION_SPEC]') >>> - >>> # Handle metadata. - >>> metadata = response.metadata() + >>> response = client.get_annotation_spec(name) Args: - parent (str): Resource name of the parent project where the model is being created. - model (Union[dict, ~google.cloud.automl_v1beta1.types.Model]): The model to create. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.Model` + name (str): Required. The resource name of the annotation spec to retrieve. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -957,7 +940,7 @@ def create_model( that is provided to the method. Returns: - A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. + A :class:`~google.cloud.automl_v1beta1.types.AnnotationSpec` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -967,22 +950,22 @@ def create_model( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "create_model" not in self._inner_api_calls: + if "get_annotation_spec" not in self._inner_api_calls: self._inner_api_calls[ - "create_model" + "get_annotation_spec" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.create_model, - default_retry=self._method_configs["CreateModel"].retry, - default_timeout=self._method_configs["CreateModel"].timeout, + self.transport.get_annotation_spec, + default_retry=self._method_configs["GetAnnotationSpec"].retry, + default_timeout=self._method_configs["GetAnnotationSpec"].timeout, client_info=self._client_info, ) - request = service_pb2.CreateModelRequest(parent=parent, model=model) + request = service_pb2.GetAnnotationSpecRequest(name=name,) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("parent", parent)] + routing_header = [("name", name)] except AttributeError: pass else: @@ -991,37 +974,36 @@ def create_model( ) metadata.append(routing_metadata) - operation = self._inner_api_calls["create_model"]( + return self._inner_api_calls["get_annotation_spec"]( request, retry=retry, timeout=timeout, metadata=metadata ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - model_pb2.Model, - metadata_type=proto_operations_pb2.OperationMetadata, - ) - def get_model( + def get_table_spec( self, name, + field_mask=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Gets a model. + Gets a table spec. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') + >>> name = client.table_spec_path('[PROJECT]', '[LOCATION]', '[DATASET]', '[TABLE_SPEC]') >>> - >>> response = client.get_model(name) + >>> response = client.get_table_spec(name) Args: - name (str): Resource name of the model. + name (str): Required. The resource name of the table spec to retrieve. + field_mask (Union[dict, ~google.cloud.automl_v1beta1.types.FieldMask]): Mask specifying which fields to read. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.automl_v1beta1.types.FieldMask` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -1032,7 +1014,7 @@ def get_model( that is provided to the method. Returns: - A :class:`~google.cloud.automl_v1beta1.types.Model` instance. + A :class:`~google.cloud.automl_v1beta1.types.TableSpec` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -1042,17 +1024,17 @@ def get_model( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "get_model" not in self._inner_api_calls: + if "get_table_spec" not in self._inner_api_calls: self._inner_api_calls[ - "get_model" + "get_table_spec" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_model, - default_retry=self._method_configs["GetModel"].retry, - default_timeout=self._method_configs["GetModel"].timeout, + self.transport.get_table_spec, + default_retry=self._method_configs["GetTableSpec"].retry, + default_timeout=self._method_configs["GetTableSpec"].timeout, client_info=self._client_info, ) - request = service_pb2.GetModelRequest(name=name) + request = service_pb2.GetTableSpecRequest(name=name, field_mask=field_mask,) if metadata is None: metadata = [] metadata = list(metadata) @@ -1066,13 +1048,14 @@ def get_model( ) metadata.append(routing_metadata) - return self._inner_api_calls["get_model"]( + return self._inner_api_calls["get_table_spec"]( request, retry=retry, timeout=timeout, metadata=metadata ) - def list_models( + def list_table_specs( self, parent, + field_mask=None, filter_=None, page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, @@ -1080,17 +1063,17 @@ def list_models( metadata=None, ): """ - Lists models. + Lists table specs in a dataset. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> parent = client.location_path('[PROJECT]', '[LOCATION]') + >>> parent = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') >>> >>> # Iterate over all results - >>> for element in client.list_models(parent): + >>> for element in client.list_table_specs(parent): ... # process element ... pass >>> @@ -1098,25 +1081,19 @@ def list_models( >>> # Alternatively: >>> >>> # Iterate over results one page at a time - >>> for page in client.list_models(parent).pages: + >>> for page in client.list_table_specs(parent).pages: ... for element in page: ... # process element ... pass Args: - parent (str): Resource name of the project, from which to list the models. - filter_ (str): An expression for filtering the results of the request. - - - ``model_metadata`` - for existence of the case (e.g. - video\_classification\_model\_metadata:\*). - - - ``dataset_id`` - for = or !=. Some examples of using the filter are: - - - ``image_classification_model_metadata:*`` --> The model has - image\_classification\_model\_metadata. + parent (str): Required. The resource name of the dataset to list table specs from. + field_mask (Union[dict, ~google.cloud.automl_v1beta1.types.FieldMask]): Mask specifying which fields to read. - - ``dataset_id=5`` --> The model was created from a dataset with ID 5. - page_size (int): The maximum number of resources contained in the + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.automl_v1beta1.types.FieldMask` + filter_ (str): Filter expression, see go/filtering. + page_size (int): The maximum number of resources contained in the underlying API response. If page streaming is performed per- resource, this parameter does not affect the return value. If page streaming is performed per-page, this determines the maximum number @@ -1132,7 +1109,7 @@ def list_models( Returns: A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.automl_v1beta1.types.Model` instances. + An iterable of :class:`~google.cloud.automl_v1beta1.types.TableSpec` instances. You can also iterate over the pages of the response using its `pages` property. @@ -1144,18 +1121,18 @@ def list_models( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "list_models" not in self._inner_api_calls: + if "list_table_specs" not in self._inner_api_calls: self._inner_api_calls[ - "list_models" + "list_table_specs" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_models, - default_retry=self._method_configs["ListModels"].retry, - default_timeout=self._method_configs["ListModels"].timeout, + self.transport.list_table_specs, + default_retry=self._method_configs["ListTableSpecs"].retry, + default_timeout=self._method_configs["ListTableSpecs"].timeout, client_info=self._client_info, ) - request = service_pb2.ListModelsRequest( - parent=parent, filter=filter_, page_size=page_size + request = service_pb2.ListTableSpecsRequest( + parent=parent, field_mask=field_mask, filter=filter_, page_size=page_size, ) if metadata is None: metadata = [] @@ -1173,50 +1150,48 @@ def list_models( iterator = google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( - self._inner_api_calls["list_models"], + self._inner_api_calls["list_table_specs"], retry=retry, timeout=timeout, metadata=metadata, ), request=request, - items_field="model", + items_field="table_specs", request_token_field="page_token", response_token_field="next_page_token", ) return iterator - def delete_model( + def update_table_spec( self, - name, + table_spec, + update_mask=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Deletes a model. Returns ``google.protobuf.Empty`` in the ``response`` - field when it completes, and ``delete_details`` in the ``metadata`` - field. + Updates a table spec. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') - >>> - >>> response = client.delete_model(name) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) + >>> # TODO: Initialize `table_spec`: + >>> table_spec = {} >>> - >>> # Handle metadata. - >>> metadata = response.metadata() + >>> response = client.update_table_spec(table_spec) Args: - name (str): Resource name of the model being deleted. + table_spec (Union[dict, ~google.cloud.automl_v1beta1.types.TableSpec]): Required. The table spec which replaces the resource on the server. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.automl_v1beta1.types.TableSpec` + update_mask (Union[dict, ~google.cloud.automl_v1beta1.types.FieldMask]): The update mask applies to the resource. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.automl_v1beta1.types.FieldMask` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -1227,7 +1202,7 @@ def delete_model( that is provided to the method. Returns: - A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. + A :class:`~google.cloud.automl_v1beta1.types.TableSpec` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -1237,22 +1212,24 @@ def delete_model( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "delete_model" not in self._inner_api_calls: + if "update_table_spec" not in self._inner_api_calls: self._inner_api_calls[ - "delete_model" + "update_table_spec" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.delete_model, - default_retry=self._method_configs["DeleteModel"].retry, - default_timeout=self._method_configs["DeleteModel"].timeout, + self.transport.update_table_spec, + default_retry=self._method_configs["UpdateTableSpec"].retry, + default_timeout=self._method_configs["UpdateTableSpec"].timeout, client_info=self._client_info, ) - request = service_pb2.DeleteModelRequest(name=name) + request = service_pb2.UpdateTableSpecRequest( + table_spec=table_spec, update_mask=update_mask, + ) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("name", name)] + routing_header = [("table_spec.name", table_spec.name)] except AttributeError: pass else: @@ -1261,66 +1238,36 @@ def delete_model( ) metadata.append(routing_metadata) - operation = self._inner_api_calls["delete_model"]( + return self._inner_api_calls["update_table_spec"]( request, retry=retry, timeout=timeout, metadata=metadata ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - empty_pb2.Empty, - metadata_type=proto_operations_pb2.OperationMetadata, - ) - def deploy_model( + def get_column_spec( self, name, - image_object_detection_model_deployment_metadata=None, - image_classification_model_deployment_metadata=None, + field_mask=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Deploys a model. If a model is already deployed, deploying it with the - same parameters has no effect. Deploying with different parametrs (as - e.g. changing - - ``node_number``) will reset the deployment state without pausing the - model's availability. - - Only applicable for Text Classification, Image Object Detection and - Tables; all other domains manage deployment automatically. - - Returns an empty response in the ``response`` field when it completes. + Gets a column spec. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') - >>> - >>> response = client.deploy_model(name) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) + >>> name = client.column_spec_path('[PROJECT]', '[LOCATION]', '[DATASET]', '[TABLE_SPEC]', '[COLUMN_SPEC]') >>> - >>> # Handle metadata. - >>> metadata = response.metadata() + >>> response = client.get_column_spec(name) Args: - name (str): Resource name of the model to deploy. - image_object_detection_model_deployment_metadata (Union[dict, ~google.cloud.automl_v1beta1.types.ImageObjectDetectionModelDeploymentMetadata]): Model deployment metadata specific to Image Object Detection. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.ImageObjectDetectionModelDeploymentMetadata` - image_classification_model_deployment_metadata (Union[dict, ~google.cloud.automl_v1beta1.types.ImageClassificationModelDeploymentMetadata]): Model deployment metadata specific to Image Classification. + name (str): Required. The resource name of the column spec to retrieve. + field_mask (Union[dict, ~google.cloud.automl_v1beta1.types.FieldMask]): Mask specifying which fields to read. If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.ImageClassificationModelDeploymentMetadata` + message :class:`~google.cloud.automl_v1beta1.types.FieldMask` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -1331,7 +1278,7 @@ def deploy_model( that is provided to the method. Returns: - A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. + A :class:`~google.cloud.automl_v1beta1.types.ColumnSpec` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -1341,28 +1288,17 @@ def deploy_model( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "deploy_model" not in self._inner_api_calls: + if "get_column_spec" not in self._inner_api_calls: self._inner_api_calls[ - "deploy_model" + "get_column_spec" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.deploy_model, - default_retry=self._method_configs["DeployModel"].retry, - default_timeout=self._method_configs["DeployModel"].timeout, + self.transport.get_column_spec, + default_retry=self._method_configs["GetColumnSpec"].retry, + default_timeout=self._method_configs["GetColumnSpec"].timeout, client_info=self._client_info, ) - # Sanity check: We have some fields which are mutually exclusive; - # raise ValueError if more than one is sent. - google.api_core.protobuf_helpers.check_oneof( - image_object_detection_model_deployment_metadata=image_object_detection_model_deployment_metadata, - image_classification_model_deployment_metadata=image_classification_model_deployment_metadata, - ) - - request = service_pb2.DeployModelRequest( - name=name, - image_object_detection_model_deployment_metadata=image_object_detection_model_deployment_metadata, - image_classification_model_deployment_metadata=image_classification_model_deployment_metadata, - ) + request = service_pb2.GetColumnSpecRequest(name=name, field_mask=field_mask,) if metadata is None: metadata = [] metadata = list(metadata) @@ -1376,52 +1312,56 @@ def deploy_model( ) metadata.append(routing_metadata) - operation = self._inner_api_calls["deploy_model"]( + return self._inner_api_calls["get_column_spec"]( request, retry=retry, timeout=timeout, metadata=metadata ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - empty_pb2.Empty, - metadata_type=proto_operations_pb2.OperationMetadata, - ) - def undeploy_model( + def list_column_specs( self, - name, + parent, + field_mask=None, + filter_=None, + page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Undeploys a model. If the model is not deployed this method has no - effect. - - Only applicable for Text Classification, Image Object Detection and - Tables; all other domains manage deployment automatically. - - Returns an empty response in the ``response`` field when it completes. + Lists column specs in a table spec. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') + >>> parent = client.table_spec_path('[PROJECT]', '[LOCATION]', '[DATASET]', '[TABLE_SPEC]') >>> - >>> response = client.undeploy_model(name) + >>> # Iterate over all results + >>> for element in client.list_column_specs(parent): + ... # process element + ... pass >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() >>> - >>> response.add_done_callback(callback) + >>> # Alternatively: >>> - >>> # Handle metadata. - >>> metadata = response.metadata() + >>> # Iterate over results one page at a time + >>> for page in client.list_column_specs(parent).pages: + ... for element in page: + ... # process element + ... pass Args: - name (str): Resource name of the model to undeploy. + parent (str): Required. The resource name of the table spec to list column specs from. + field_mask (Union[dict, ~google.cloud.automl_v1beta1.types.FieldMask]): Mask specifying which fields to read. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.automl_v1beta1.types.FieldMask` + filter_ (str): Filter expression, see go/filtering. + page_size (int): The maximum number of resources contained in the + underlying API response. If page streaming is performed per- + resource, this parameter does not affect the return value. If page + streaming is performed per-page, this determines the maximum number + of resources in a page. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -1432,7 +1372,10 @@ def undeploy_model( that is provided to the method. Returns: - A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. + A :class:`~google.api_core.page_iterator.PageIterator` instance. + An iterable of :class:`~google.cloud.automl_v1beta1.types.ColumnSpec` instances. + You can also iterate over the pages of the response + using its `pages` property. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -1442,22 +1385,24 @@ def undeploy_model( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "undeploy_model" not in self._inner_api_calls: + if "list_column_specs" not in self._inner_api_calls: self._inner_api_calls[ - "undeploy_model" + "list_column_specs" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.undeploy_model, - default_retry=self._method_configs["UndeployModel"].retry, - default_timeout=self._method_configs["UndeployModel"].timeout, + self.transport.list_column_specs, + default_retry=self._method_configs["ListColumnSpecs"].retry, + default_timeout=self._method_configs["ListColumnSpecs"].timeout, client_info=self._client_info, ) - request = service_pb2.UndeployModelRequest(name=name) + request = service_pb2.ListColumnSpecsRequest( + parent=parent, field_mask=field_mask, filter=filter_, page_size=page_size, + ) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("name", name)] + routing_header = [("parent", parent)] except AttributeError: pass else: @@ -1466,37 +1411,51 @@ def undeploy_model( ) metadata.append(routing_metadata) - operation = self._inner_api_calls["undeploy_model"]( - request, retry=retry, timeout=timeout, metadata=metadata - ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - empty_pb2.Empty, - metadata_type=proto_operations_pb2.OperationMetadata, + iterator = google.api_core.page_iterator.GRPCIterator( + client=None, + method=functools.partial( + self._inner_api_calls["list_column_specs"], + retry=retry, + timeout=timeout, + metadata=metadata, + ), + request=request, + items_field="column_specs", + request_token_field="page_token", + response_token_field="next_page_token", ) + return iterator - def get_model_evaluation( + def update_column_spec( self, - name, + column_spec, + update_mask=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Gets a model evaluation. + Updates a column spec. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> name = client.model_evaluation_path('[PROJECT]', '[LOCATION]', '[MODEL]', '[MODEL_EVALUATION]') + >>> # TODO: Initialize `column_spec`: + >>> column_spec = {} >>> - >>> response = client.get_model_evaluation(name) + >>> response = client.update_column_spec(column_spec) Args: - name (str): Resource name for the model evaluation. + column_spec (Union[dict, ~google.cloud.automl_v1beta1.types.ColumnSpec]): Required. The column spec which replaces the resource on the server. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.automl_v1beta1.types.ColumnSpec` + update_mask (Union[dict, ~google.cloud.automl_v1beta1.types.FieldMask]): The update mask applies to the resource. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.automl_v1beta1.types.FieldMask` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -1507,7 +1466,7 @@ def get_model_evaluation( that is provided to the method. Returns: - A :class:`~google.cloud.automl_v1beta1.types.ModelEvaluation` instance. + A :class:`~google.cloud.automl_v1beta1.types.ColumnSpec` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -1517,22 +1476,24 @@ def get_model_evaluation( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "get_model_evaluation" not in self._inner_api_calls: + if "update_column_spec" not in self._inner_api_calls: self._inner_api_calls[ - "get_model_evaluation" + "update_column_spec" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_model_evaluation, - default_retry=self._method_configs["GetModelEvaluation"].retry, - default_timeout=self._method_configs["GetModelEvaluation"].timeout, + self.transport.update_column_spec, + default_retry=self._method_configs["UpdateColumnSpec"].retry, + default_timeout=self._method_configs["UpdateColumnSpec"].timeout, client_info=self._client_info, ) - request = service_pb2.GetModelEvaluationRequest(name=name) - if metadata is None: + request = service_pb2.UpdateColumnSpecRequest( + column_spec=column_spec, update_mask=update_mask, + ) + if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("name", name)] + routing_header = [("column_spec.name", column_spec.name)] except AttributeError: pass else: @@ -1541,38 +1502,35 @@ def get_model_evaluation( ) metadata.append(routing_metadata) - return self._inner_api_calls["get_model_evaluation"]( + return self._inner_api_calls["update_column_spec"]( request, retry=retry, timeout=timeout, metadata=metadata ) - def export_model( + def create_model( self, - name, - output_config, + parent, + model, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Exports a trained, "export-able", model to a user specified Google Cloud - Storage location. A model is considered export-able if and only if it - has an export format defined for it in - - ``ModelExportOutputConfig``. - - Returns an empty response in the ``response`` field when it completes. + Creates a model. Returns a Model in the ``response`` field when it + completes. When you create a model, several model evaluations are + created for it: a global evaluation, and one evaluation for each + annotation spec. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') + >>> parent = client.location_path('[PROJECT]', '[LOCATION]') >>> - >>> # TODO: Initialize `output_config`: - >>> output_config = {} + >>> # TODO: Initialize `model`: + >>> model = {} >>> - >>> response = client.export_model(name, output_config) + >>> response = client.create_model(parent, model) >>> >>> def callback(operation_future): ... # Handle result. @@ -1584,11 +1542,11 @@ def export_model( >>> metadata = response.metadata() Args: - name (str): Required. The resource name of the model to export. - output_config (Union[dict, ~google.cloud.automl_v1beta1.types.ModelExportOutputConfig]): Required. The desired output location and configuration. + parent (str): Required. Resource name of the parent project where the model is being created. + model (Union[dict, ~google.cloud.automl_v1beta1.types.Model]): Required. The model to create. If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.ModelExportOutputConfig` + message :class:`~google.cloud.automl_v1beta1.types.Model` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -1609,22 +1567,22 @@ def export_model( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "export_model" not in self._inner_api_calls: + if "create_model" not in self._inner_api_calls: self._inner_api_calls[ - "export_model" + "create_model" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.export_model, - default_retry=self._method_configs["ExportModel"].retry, - default_timeout=self._method_configs["ExportModel"].timeout, + self.transport.create_model, + default_retry=self._method_configs["CreateModel"].retry, + default_timeout=self._method_configs["CreateModel"].timeout, client_info=self._client_info, ) - request = service_pb2.ExportModelRequest(name=name, output_config=output_config) + request = service_pb2.CreateModelRequest(parent=parent, model=model,) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("name", name)] + routing_header = [("parent", parent)] except AttributeError: pass else: @@ -1633,37 +1591,25 @@ def export_model( ) metadata.append(routing_metadata) - operation = self._inner_api_calls["export_model"]( + operation = self._inner_api_calls["create_model"]( request, retry=retry, timeout=timeout, metadata=metadata ) return google.api_core.operation.from_gapic( operation, self.transport._operations_client, - empty_pb2.Empty, + model_pb2.Model, metadata_type=proto_operations_pb2.OperationMetadata, ) - def export_evaluated_examples( + def get_model( self, name, - output_config, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Exports examples on which the model was evaluated (i.e. which were in - the TEST set of the dataset the model was created from), together with - their ground truth annotations and the annotations created (predicted) - by the model. The examples, ground truth and predictions are exported in - the state they were at the moment the model was evaluated. - - This export is available only for 30 days since the model evaluation is - created. - - Currently only available for Tables. - - Returns an empty response in the ``response`` field when it completes. + Gets a model. Example: >>> from google.cloud import automl_v1beta1 @@ -1672,27 +1618,10 @@ def export_evaluated_examples( >>> >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> - >>> # TODO: Initialize `output_config`: - >>> output_config = {} - >>> - >>> response = client.export_evaluated_examples(name, output_config) - >>> - >>> def callback(operation_future): - ... # Handle result. - ... result = operation_future.result() - >>> - >>> response.add_done_callback(callback) - >>> - >>> # Handle metadata. - >>> metadata = response.metadata() + >>> response = client.get_model(name) Args: - name (str): Required. The resource name of the model whose evaluated examples are to - be exported. - output_config (Union[dict, ~google.cloud.automl_v1beta1.types.ExportEvaluatedExamplesOutputConfig]): Required. The desired output location and configuration. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.ExportEvaluatedExamplesOutputConfig` + name (str): Required. Resource name of the model. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -1703,7 +1632,7 @@ def export_evaluated_examples( that is provided to the method. Returns: - A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. + A :class:`~google.cloud.automl_v1beta1.types.Model` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -1713,19 +1642,17 @@ def export_evaluated_examples( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "export_evaluated_examples" not in self._inner_api_calls: + if "get_model" not in self._inner_api_calls: self._inner_api_calls[ - "export_evaluated_examples" + "get_model" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.export_evaluated_examples, - default_retry=self._method_configs["ExportEvaluatedExamples"].retry, - default_timeout=self._method_configs["ExportEvaluatedExamples"].timeout, + self.transport.get_model, + default_retry=self._method_configs["GetModel"].retry, + default_timeout=self._method_configs["GetModel"].timeout, client_info=self._client_info, ) - request = service_pb2.ExportEvaluatedExamplesRequest( - name=name, output_config=output_config - ) + request = service_pb2.GetModelRequest(name=name,) if metadata is None: metadata = [] metadata = list(metadata) @@ -1739,17 +1666,11 @@ def export_evaluated_examples( ) metadata.append(routing_metadata) - operation = self._inner_api_calls["export_evaluated_examples"]( + return self._inner_api_calls["get_model"]( request, retry=retry, timeout=timeout, metadata=metadata ) - return google.api_core.operation.from_gapic( - operation, - self.transport._operations_client, - empty_pb2.Empty, - metadata_type=proto_operations_pb2.OperationMetadata, - ) - def list_model_evaluations( + def list_models( self, parent, filter_=None, @@ -1759,17 +1680,17 @@ def list_model_evaluations( metadata=None, ): """ - Lists model evaluations. + Lists models. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> parent = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') + >>> parent = client.location_path('[PROJECT]', '[LOCATION]') >>> >>> # Iterate over all results - >>> for element in client.list_model_evaluations(parent): + >>> for element in client.list_models(parent): ... # process element ... pass >>> @@ -1777,26 +1698,24 @@ def list_model_evaluations( >>> # Alternatively: >>> >>> # Iterate over results one page at a time - >>> for page in client.list_model_evaluations(parent).pages: + >>> for page in client.list_models(parent).pages: ... for element in page: ... # process element ... pass Args: - parent (str): Resource name of the model to list the model evaluations for. - If modelId is set as "-", this will list model evaluations from across all - models of the parent location. + parent (str): Required. Resource name of the project, from which to list the models. filter_ (str): An expression for filtering the results of the request. - - ``annotation_spec_id`` - for =, != or existence. See example below - for the last. + - ``model_metadata`` - for existence of the case (e.g. + ``video_classification_model_metadata:*``). - Some examples of using the filter are: + - ``dataset_id`` - for = or !=. Some examples of using the filter are: - - ``annotation_spec_id!=4`` --> The model evaluation was done for - annotation spec with ID different than 4. - - ``NOT annotation_spec_id:*`` --> The model evaluation was done for - aggregate of all annotation specs. + - ``image_classification_model_metadata:*`` --> The model has + image_classification_model_metadata. + + - ``dataset_id=5`` --> The model was created from a dataset with ID 5. page_size (int): The maximum number of resources contained in the underlying API response. If page streaming is performed per- resource, this parameter does not affect the return value. If page @@ -1813,7 +1732,7 @@ def list_model_evaluations( Returns: A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.automl_v1beta1.types.ModelEvaluation` instances. + An iterable of :class:`~google.cloud.automl_v1beta1.types.Model` instances. You can also iterate over the pages of the response using its `pages` property. @@ -1825,18 +1744,18 @@ def list_model_evaluations( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "list_model_evaluations" not in self._inner_api_calls: + if "list_models" not in self._inner_api_calls: self._inner_api_calls[ - "list_model_evaluations" + "list_models" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_model_evaluations, - default_retry=self._method_configs["ListModelEvaluations"].retry, - default_timeout=self._method_configs["ListModelEvaluations"].timeout, + self.transport.list_models, + default_retry=self._method_configs["ListModels"].retry, + default_timeout=self._method_configs["ListModels"].timeout, client_info=self._client_info, ) - request = service_pb2.ListModelEvaluationsRequest( - parent=parent, filter=filter_, page_size=page_size + request = service_pb2.ListModelsRequest( + parent=parent, filter=filter_, page_size=page_size, ) if metadata is None: metadata = [] @@ -1854,19 +1773,19 @@ def list_model_evaluations( iterator = google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( - self._inner_api_calls["list_model_evaluations"], + self._inner_api_calls["list_models"], retry=retry, timeout=timeout, metadata=metadata, ), request=request, - items_field="model_evaluation", + items_field="model", request_token_field="page_token", response_token_field="next_page_token", ) return iterator - def get_annotation_spec( + def delete_model( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, @@ -1874,19 +1793,30 @@ def get_annotation_spec( metadata=None, ): """ - Gets an annotation spec. + Deletes a model. Returns ``google.protobuf.Empty`` in the + ``response`` field when it completes, and ``delete_details`` in the + ``metadata`` field. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> name = client.annotation_spec_path('[PROJECT]', '[LOCATION]', '[DATASET]', '[ANNOTATION_SPEC]') + >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> - >>> response = client.get_annotation_spec(name) + >>> response = client.delete_model(name) + >>> + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() + >>> + >>> response.add_done_callback(callback) + >>> + >>> # Handle metadata. + >>> metadata = response.metadata() Args: - name (str): The resource name of the annotation spec to retrieve. + name (str): Required. Resource name of the model being deleted. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -1897,7 +1827,7 @@ def get_annotation_spec( that is provided to the method. Returns: - A :class:`~google.cloud.automl_v1beta1.types.AnnotationSpec` instance. + A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -1907,17 +1837,17 @@ def get_annotation_spec( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "get_annotation_spec" not in self._inner_api_calls: + if "delete_model" not in self._inner_api_calls: self._inner_api_calls[ - "get_annotation_spec" + "delete_model" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_annotation_spec, - default_retry=self._method_configs["GetAnnotationSpec"].retry, - default_timeout=self._method_configs["GetAnnotationSpec"].timeout, + self.transport.delete_model, + default_retry=self._method_configs["DeleteModel"].retry, + default_timeout=self._method_configs["DeleteModel"].timeout, client_info=self._client_info, ) - request = service_pb2.GetAnnotationSpecRequest(name=name) + request = service_pb2.DeleteModelRequest(name=name,) if metadata is None: metadata = [] metadata = list(metadata) @@ -1931,36 +1861,67 @@ def get_annotation_spec( ) metadata.append(routing_metadata) - return self._inner_api_calls["get_annotation_spec"]( + operation = self._inner_api_calls["delete_model"]( request, retry=retry, timeout=timeout, metadata=metadata ) + return google.api_core.operation.from_gapic( + operation, + self.transport._operations_client, + empty_pb2.Empty, + metadata_type=proto_operations_pb2.OperationMetadata, + ) - def get_table_spec( + def deploy_model( self, name, - field_mask=None, + image_object_detection_model_deployment_metadata=None, + image_classification_model_deployment_metadata=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Gets a table spec. + Deploys a model. If a model is already deployed, deploying it with + the same parameters has no effect. Deploying with different parametrs + (as e.g. changing + + ``node_number``) will reset the deployment state without pausing the + model's availability. + + Only applicable for Text Classification, Image Object Detection , + Tables, and Image Segmentation; all other domains manage deployment + automatically. + + Returns an empty response in the ``response`` field when it completes. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> name = client.table_spec_path('[PROJECT]', '[LOCATION]', '[DATASET]', '[TABLE_SPEC]') + >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> - >>> response = client.get_table_spec(name) + >>> response = client.deploy_model(name) + >>> + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() + >>> + >>> response.add_done_callback(callback) + >>> + >>> # Handle metadata. + >>> metadata = response.metadata() Args: - name (str): The resource name of the table spec to retrieve. - field_mask (Union[dict, ~google.cloud.automl_v1beta1.types.FieldMask]): Mask specifying which fields to read. + name (str): Required. Resource name of the model to deploy. + image_object_detection_model_deployment_metadata (Union[dict, ~google.cloud.automl_v1beta1.types.ImageObjectDetectionModelDeploymentMetadata]): Model deployment metadata specific to Image Object Detection. If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.FieldMask` + message :class:`~google.cloud.automl_v1beta1.types.ImageObjectDetectionModelDeploymentMetadata` + image_classification_model_deployment_metadata (Union[dict, ~google.cloud.automl_v1beta1.types.ImageClassificationModelDeploymentMetadata]): Model deployment metadata specific to Image Classification. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.automl_v1beta1.types.ImageClassificationModelDeploymentMetadata` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -1971,7 +1932,7 @@ def get_table_spec( that is provided to the method. Returns: - A :class:`~google.cloud.automl_v1beta1.types.TableSpec` instance. + A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -1981,17 +1942,28 @@ def get_table_spec( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "get_table_spec" not in self._inner_api_calls: + if "deploy_model" not in self._inner_api_calls: self._inner_api_calls[ - "get_table_spec" + "deploy_model" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_table_spec, - default_retry=self._method_configs["GetTableSpec"].retry, - default_timeout=self._method_configs["GetTableSpec"].timeout, + self.transport.deploy_model, + default_retry=self._method_configs["DeployModel"].retry, + default_timeout=self._method_configs["DeployModel"].timeout, client_info=self._client_info, ) - request = service_pb2.GetTableSpecRequest(name=name, field_mask=field_mask) + # Sanity check: We have some fields which are mutually exclusive; + # raise ValueError if more than one is sent. + google.api_core.protobuf_helpers.check_oneof( + image_object_detection_model_deployment_metadata=image_object_detection_model_deployment_metadata, + image_classification_model_deployment_metadata=image_classification_model_deployment_metadata, + ) + + request = service_pb2.DeployModelRequest( + name=name, + image_object_detection_model_deployment_metadata=image_object_detection_model_deployment_metadata, + image_classification_model_deployment_metadata=image_classification_model_deployment_metadata, + ) if metadata is None: metadata = [] metadata = list(metadata) @@ -2005,56 +1977,52 @@ def get_table_spec( ) metadata.append(routing_metadata) - return self._inner_api_calls["get_table_spec"]( + operation = self._inner_api_calls["deploy_model"]( request, retry=retry, timeout=timeout, metadata=metadata ) + return google.api_core.operation.from_gapic( + operation, + self.transport._operations_client, + empty_pb2.Empty, + metadata_type=proto_operations_pb2.OperationMetadata, + ) - def list_table_specs( + def undeploy_model( self, - parent, - field_mask=None, - filter_=None, - page_size=None, + name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Lists table specs in a dataset. - + Undeploys a model. If the model is not deployed this method has no + effect. + + Only applicable for Text Classification, Image Object Detection and + Tables; all other domains manage deployment automatically. + + Returns an empty response in the ``response`` field when it completes. + Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> parent = client.dataset_path('[PROJECT]', '[LOCATION]', '[DATASET]') + >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> - >>> # Iterate over all results - >>> for element in client.list_table_specs(parent): - ... # process element - ... pass + >>> response = client.undeploy_model(name) >>> + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() >>> - >>> # Alternatively: + >>> response.add_done_callback(callback) >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_table_specs(parent).pages: - ... for element in page: - ... # process element - ... pass + >>> # Handle metadata. + >>> metadata = response.metadata() Args: - parent (str): The resource name of the dataset to list table specs from. - field_mask (Union[dict, ~google.cloud.automl_v1beta1.types.FieldMask]): Mask specifying which fields to read. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.FieldMask` - filter_ (str): Filter expression, see go/filtering. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. + name (str): Required. Resource name of the model to undeploy. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -2065,10 +2033,7 @@ def list_table_specs( that is provided to the method. Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.automl_v1beta1.types.TableSpec` instances. - You can also iterate over the pages of the response - using its `pages` property. + A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -2078,24 +2043,22 @@ def list_table_specs( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "list_table_specs" not in self._inner_api_calls: + if "undeploy_model" not in self._inner_api_calls: self._inner_api_calls[ - "list_table_specs" + "undeploy_model" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_table_specs, - default_retry=self._method_configs["ListTableSpecs"].retry, - default_timeout=self._method_configs["ListTableSpecs"].timeout, + self.transport.undeploy_model, + default_retry=self._method_configs["UndeployModel"].retry, + default_timeout=self._method_configs["UndeployModel"].timeout, client_info=self._client_info, ) - request = service_pb2.ListTableSpecsRequest( - parent=parent, field_mask=field_mask, filter=filter_, page_size=page_size - ) + request = service_pb2.UndeployModelRequest(name=name,) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("parent", parent)] + routing_header = [("name", name)] except AttributeError: pass else: @@ -2104,51 +2067,60 @@ def list_table_specs( ) metadata.append(routing_metadata) - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_table_specs"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="table_specs", - request_token_field="page_token", - response_token_field="next_page_token", + operation = self._inner_api_calls["undeploy_model"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + return google.api_core.operation.from_gapic( + operation, + self.transport._operations_client, + empty_pb2.Empty, + metadata_type=proto_operations_pb2.OperationMetadata, ) - return iterator - def update_table_spec( + def export_model( self, - table_spec, - update_mask=None, + name, + output_config, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Updates a table spec. + Exports a trained, "export-able", model to a user specified Google + Cloud Storage location. A model is considered export-able if and only if + it has an export format defined for it in + + ``ModelExportOutputConfig``. + + Returns an empty response in the ``response`` field when it completes. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> # TODO: Initialize `table_spec`: - >>> table_spec = {} + >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> - >>> response = client.update_table_spec(table_spec) + >>> # TODO: Initialize `output_config`: + >>> output_config = {} + >>> + >>> response = client.export_model(name, output_config) + >>> + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() + >>> + >>> response.add_done_callback(callback) + >>> + >>> # Handle metadata. + >>> metadata = response.metadata() Args: - table_spec (Union[dict, ~google.cloud.automl_v1beta1.types.TableSpec]): The table spec which replaces the resource on the server. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.TableSpec` - update_mask (Union[dict, ~google.cloud.automl_v1beta1.types.FieldMask]): The update mask applies to the resource. + name (str): Required. The resource name of the model to export. + output_config (Union[dict, ~google.cloud.automl_v1beta1.types.ModelExportOutputConfig]): Required. The desired output location and configuration. If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.FieldMask` + message :class:`~google.cloud.automl_v1beta1.types.ModelExportOutputConfig` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -2159,7 +2131,7 @@ def update_table_spec( that is provided to the method. Returns: - A :class:`~google.cloud.automl_v1beta1.types.TableSpec` instance. + A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -2169,24 +2141,24 @@ def update_table_spec( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "update_table_spec" not in self._inner_api_calls: + if "export_model" not in self._inner_api_calls: self._inner_api_calls[ - "update_table_spec" + "export_model" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_table_spec, - default_retry=self._method_configs["UpdateTableSpec"].retry, - default_timeout=self._method_configs["UpdateTableSpec"].timeout, + self.transport.export_model, + default_retry=self._method_configs["ExportModel"].retry, + default_timeout=self._method_configs["ExportModel"].timeout, client_info=self._client_info, ) - request = service_pb2.UpdateTableSpecRequest( - table_spec=table_spec, update_mask=update_mask + request = service_pb2.ExportModelRequest( + name=name, output_config=output_config, ) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("table_spec.name", table_spec.name)] + routing_header = [("name", name)] except AttributeError: pass else: @@ -2195,36 +2167,66 @@ def update_table_spec( ) metadata.append(routing_metadata) - return self._inner_api_calls["update_table_spec"]( + operation = self._inner_api_calls["export_model"]( request, retry=retry, timeout=timeout, metadata=metadata ) + return google.api_core.operation.from_gapic( + operation, + self.transport._operations_client, + empty_pb2.Empty, + metadata_type=proto_operations_pb2.OperationMetadata, + ) - def get_column_spec( + def export_evaluated_examples( self, name, - field_mask=None, + output_config, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Gets a column spec. + Exports examples on which the model was evaluated (i.e. which were + in the TEST set of the dataset the model was created from), together + with their ground truth annotations and the annotations created + (predicted) by the model. The examples, ground truth and predictions are + exported in the state they were at the moment the model was evaluated. + + This export is available only for 30 days since the model evaluation is + created. + + Currently only available for Tables. + + Returns an empty response in the ``response`` field when it completes. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> name = client.column_spec_path('[PROJECT]', '[LOCATION]', '[DATASET]', '[TABLE_SPEC]', '[COLUMN_SPEC]') + >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> - >>> response = client.get_column_spec(name) + >>> # TODO: Initialize `output_config`: + >>> output_config = {} + >>> + >>> response = client.export_evaluated_examples(name, output_config) + >>> + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() + >>> + >>> response.add_done_callback(callback) + >>> + >>> # Handle metadata. + >>> metadata = response.metadata() Args: - name (str): The resource name of the column spec to retrieve. - field_mask (Union[dict, ~google.cloud.automl_v1beta1.types.FieldMask]): Mask specifying which fields to read. + name (str): Required. The resource name of the model whose evaluated examples are to + be exported. + output_config (Union[dict, ~google.cloud.automl_v1beta1.types.ExportEvaluatedExamplesOutputConfig]): Required. The desired output location and configuration. If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.FieldMask` + message :class:`~google.cloud.automl_v1beta1.types.ExportEvaluatedExamplesOutputConfig` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -2235,7 +2237,7 @@ def get_column_spec( that is provided to the method. Returns: - A :class:`~google.cloud.automl_v1beta1.types.ColumnSpec` instance. + A :class:`~google.cloud.automl_v1beta1.types._OperationFuture` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -2245,17 +2247,19 @@ def get_column_spec( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "get_column_spec" not in self._inner_api_calls: + if "export_evaluated_examples" not in self._inner_api_calls: self._inner_api_calls[ - "get_column_spec" + "export_evaluated_examples" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.get_column_spec, - default_retry=self._method_configs["GetColumnSpec"].retry, - default_timeout=self._method_configs["GetColumnSpec"].timeout, + self.transport.export_evaluated_examples, + default_retry=self._method_configs["ExportEvaluatedExamples"].retry, + default_timeout=self._method_configs["ExportEvaluatedExamples"].timeout, client_info=self._client_info, ) - request = service_pb2.GetColumnSpecRequest(name=name, field_mask=field_mask) + request = service_pb2.ExportEvaluatedExamplesRequest( + name=name, output_config=output_config, + ) if metadata is None: metadata = [] metadata = list(metadata) @@ -2269,56 +2273,37 @@ def get_column_spec( ) metadata.append(routing_metadata) - return self._inner_api_calls["get_column_spec"]( + operation = self._inner_api_calls["export_evaluated_examples"]( request, retry=retry, timeout=timeout, metadata=metadata ) + return google.api_core.operation.from_gapic( + operation, + self.transport._operations_client, + empty_pb2.Empty, + metadata_type=proto_operations_pb2.OperationMetadata, + ) - def list_column_specs( + def get_model_evaluation( self, - parent, - field_mask=None, - filter_=None, - page_size=None, + name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Lists column specs in a table spec. + Gets a model evaluation. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> parent = client.table_spec_path('[PROJECT]', '[LOCATION]', '[DATASET]', '[TABLE_SPEC]') - >>> - >>> # Iterate over all results - >>> for element in client.list_column_specs(parent): - ... # process element - ... pass - >>> - >>> - >>> # Alternatively: + >>> name = client.model_evaluation_path('[PROJECT]', '[LOCATION]', '[MODEL]', '[MODEL_EVALUATION]') >>> - >>> # Iterate over results one page at a time - >>> for page in client.list_column_specs(parent).pages: - ... for element in page: - ... # process element - ... pass + >>> response = client.get_model_evaluation(name) Args: - parent (str): The resource name of the table spec to list column specs from. - field_mask (Union[dict, ~google.cloud.automl_v1beta1.types.FieldMask]): Mask specifying which fields to read. - - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.FieldMask` - filter_ (str): Filter expression, see go/filtering. - page_size (int): The maximum number of resources contained in the - underlying API response. If page streaming is performed per- - resource, this parameter does not affect the return value. If page - streaming is performed per-page, this determines the maximum number - of resources in a page. + name (str): Required. Resource name for the model evaluation. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -2329,10 +2314,7 @@ def list_column_specs( that is provided to the method. Returns: - A :class:`~google.api_core.page_iterator.PageIterator` instance. - An iterable of :class:`~google.cloud.automl_v1beta1.types.ColumnSpec` instances. - You can also iterate over the pages of the response - using its `pages` property. + A :class:`~google.cloud.automl_v1beta1.types.ModelEvaluation` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -2342,24 +2324,22 @@ def list_column_specs( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "list_column_specs" not in self._inner_api_calls: + if "get_model_evaluation" not in self._inner_api_calls: self._inner_api_calls[ - "list_column_specs" + "get_model_evaluation" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.list_column_specs, - default_retry=self._method_configs["ListColumnSpecs"].retry, - default_timeout=self._method_configs["ListColumnSpecs"].timeout, + self.transport.get_model_evaluation, + default_retry=self._method_configs["GetModelEvaluation"].retry, + default_timeout=self._method_configs["GetModelEvaluation"].timeout, client_info=self._client_info, ) - request = service_pb2.ListColumnSpecsRequest( - parent=parent, field_mask=field_mask, filter=filter_, page_size=page_size - ) + request = service_pb2.GetModelEvaluationRequest(name=name,) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("parent", parent)] + routing_header = [("name", name)] except AttributeError: pass else: @@ -2368,51 +2348,63 @@ def list_column_specs( ) metadata.append(routing_metadata) - iterator = google.api_core.page_iterator.GRPCIterator( - client=None, - method=functools.partial( - self._inner_api_calls["list_column_specs"], - retry=retry, - timeout=timeout, - metadata=metadata, - ), - request=request, - items_field="column_specs", - request_token_field="page_token", - response_token_field="next_page_token", + return self._inner_api_calls["get_model_evaluation"]( + request, retry=retry, timeout=timeout, metadata=metadata ) - return iterator - def update_column_spec( + def list_model_evaluations( self, - column_spec, - update_mask=None, + parent, + filter_=None, + page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ - Updates a column spec. + Lists model evaluations. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.AutoMlClient() >>> - >>> # TODO: Initialize `column_spec`: - >>> column_spec = {} + >>> parent = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> - >>> response = client.update_column_spec(column_spec) + >>> # Iterate over all results + >>> for element in client.list_model_evaluations(parent): + ... # process element + ... pass + >>> + >>> + >>> # Alternatively: + >>> + >>> # Iterate over results one page at a time + >>> for page in client.list_model_evaluations(parent).pages: + ... for element in page: + ... # process element + ... pass Args: - column_spec (Union[dict, ~google.cloud.automl_v1beta1.types.ColumnSpec]): The column spec which replaces the resource on the server. + parent (str): Required. Resource name of the model to list the model evaluations for. + If modelId is set as "-", this will list model evaluations from across all + models of the parent location. + filter_ (str): An expression for filtering the results of the request. - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.ColumnSpec` - update_mask (Union[dict, ~google.cloud.automl_v1beta1.types.FieldMask]): The update mask applies to the resource. + - ``annotation_spec_id`` - for =, != or existence. See example below + for the last. - If a dict is provided, it must be of the same form as the protobuf - message :class:`~google.cloud.automl_v1beta1.types.FieldMask` + Some examples of using the filter are: + + - ``annotation_spec_id!=4`` --> The model evaluation was done for + annotation spec with ID different than 4. + - ``NOT annotation_spec_id:*`` --> The model evaluation was done for + aggregate of all annotation specs. + page_size (int): The maximum number of resources contained in the + underlying API response. If page streaming is performed per- + resource, this parameter does not affect the return value. If page + streaming is performed per-page, this determines the maximum number + of resources in a page. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -2423,7 +2415,10 @@ def update_column_spec( that is provided to the method. Returns: - A :class:`~google.cloud.automl_v1beta1.types.ColumnSpec` instance. + A :class:`~google.api_core.page_iterator.PageIterator` instance. + An iterable of :class:`~google.cloud.automl_v1beta1.types.ModelEvaluation` instances. + You can also iterate over the pages of the response + using its `pages` property. Raises: google.api_core.exceptions.GoogleAPICallError: If the request @@ -2433,24 +2428,24 @@ def update_column_spec( ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. - if "update_column_spec" not in self._inner_api_calls: + if "list_model_evaluations" not in self._inner_api_calls: self._inner_api_calls[ - "update_column_spec" + "list_model_evaluations" ] = google.api_core.gapic_v1.method.wrap_method( - self.transport.update_column_spec, - default_retry=self._method_configs["UpdateColumnSpec"].retry, - default_timeout=self._method_configs["UpdateColumnSpec"].timeout, + self.transport.list_model_evaluations, + default_retry=self._method_configs["ListModelEvaluations"].retry, + default_timeout=self._method_configs["ListModelEvaluations"].timeout, client_info=self._client_info, ) - request = service_pb2.UpdateColumnSpecRequest( - column_spec=column_spec, update_mask=update_mask + request = service_pb2.ListModelEvaluationsRequest( + parent=parent, filter=filter_, page_size=page_size, ) if metadata is None: metadata = [] metadata = list(metadata) try: - routing_header = [("column_spec.name", column_spec.name)] + routing_header = [("parent", parent)] except AttributeError: pass else: @@ -2459,6 +2454,17 @@ def update_column_spec( ) metadata.append(routing_metadata) - return self._inner_api_calls["update_column_spec"]( - request, retry=retry, timeout=timeout, metadata=metadata + iterator = google.api_core.page_iterator.GRPCIterator( + client=None, + method=functools.partial( + self._inner_api_calls["list_model_evaluations"], + retry=retry, + timeout=timeout, + metadata=metadata, + ), + request=request, + items_field="model_evaluation", + request_token_field="page_token", + response_token_field="next_page_token", ) + return iterator diff --git a/google/cloud/automl_v1beta1/gapic/auto_ml_client_config.py b/google/cloud/automl_v1beta1/gapic/auto_ml_client_config.py index d127ce6f..7319dbad 100644 --- a/google/cloud/automl_v1beta1/gapic/auto_ml_client_config.py +++ b/google/cloud/automl_v1beta1/gapic/auto_ml_client_config.py @@ -2,140 +2,159 @@ "interfaces": { "google.cloud.automl.v1beta1.AutoMl": { "retry_codes": { - "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], - "non_idempotent": [], + "retry_policy_1_codes": ["UNAVAILABLE", "DEADLINE_EXCEEDED"], + "no_retry_2_codes": [], + "no_retry_codes": [], }, "retry_params": { - "default": { + "retry_policy_1_params": { "initial_retry_delay_millis": 100, "retry_delay_multiplier": 1.3, "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 20000, + "initial_rpc_timeout_millis": 5000, "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 20000, - "total_timeout_millis": 600000, - } + "max_rpc_timeout_millis": 5000, + "total_timeout_millis": 5000, + }, + "no_retry_params": { + "initial_retry_delay_millis": 0, + "retry_delay_multiplier": 0.0, + "max_retry_delay_millis": 0, + "initial_rpc_timeout_millis": 0, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 0, + "total_timeout_millis": 0, + }, + "no_retry_2_params": { + "initial_retry_delay_millis": 0, + "retry_delay_multiplier": 0.0, + "max_retry_delay_millis": 0, + "initial_rpc_timeout_millis": 5000, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 5000, + "total_timeout_millis": 5000, + }, }, "methods": { "CreateDataset": { "timeout_millis": 5000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "UpdateDataset": { - "timeout_millis": 5000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "retry_codes_name": "no_retry_2_codes", + "retry_params_name": "no_retry_2_params", }, "GetDataset": { "timeout_millis": 5000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params", }, "ListDatasets": { "timeout_millis": 50000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params", + }, + "UpdateDataset": { + "timeout_millis": 5000, + "retry_codes_name": "no_retry_2_codes", + "retry_params_name": "no_retry_2_params", }, "DeleteDataset": { "timeout_millis": 5000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params", }, "ImportData": { "timeout_millis": 20000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "retry_codes_name": "no_retry_2_codes", + "retry_params_name": "no_retry_2_params", }, "ExportData": { "timeout_millis": 5000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", - }, - "CreateModel": { - "timeout_millis": 20000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "retry_codes_name": "no_retry_2_codes", + "retry_params_name": "no_retry_2_params", }, - "GetModel": { + "GetAnnotationSpec": { "timeout_millis": 5000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params", }, - "ListModels": { - "timeout_millis": 50000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "GetTableSpec": { + "timeout_millis": 5000, + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params", }, - "DeleteModel": { + "ListTableSpecs": { "timeout_millis": 5000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params", }, - "DeployModel": { + "UpdateTableSpec": { "timeout_millis": 5000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "retry_codes_name": "no_retry_2_codes", + "retry_params_name": "no_retry_2_params", }, - "UndeployModel": { + "GetColumnSpec": { "timeout_millis": 5000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params", }, - "GetModelEvaluation": { + "ListColumnSpecs": { "timeout_millis": 5000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params", }, - "ExportModel": { + "UpdateColumnSpec": { "timeout_millis": 5000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "retry_codes_name": "no_retry_2_codes", + "retry_params_name": "no_retry_2_params", }, - "ExportEvaluatedExamples": { + "CreateModel": { + "timeout_millis": 20000, + "retry_codes_name": "no_retry_2_codes", + "retry_params_name": "no_retry_2_params", + }, + "GetModel": { "timeout_millis": 5000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params", }, - "ListModelEvaluations": { + "ListModels": { "timeout_millis": 50000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params", }, - "GetAnnotationSpec": { + "DeleteModel": { "timeout_millis": 5000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params", }, - "GetTableSpec": { + "DeployModel": { "timeout_millis": 5000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "retry_codes_name": "no_retry_2_codes", + "retry_params_name": "no_retry_2_params", }, - "ListTableSpecs": { + "UndeployModel": { "timeout_millis": 5000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "retry_codes_name": "no_retry_2_codes", + "retry_params_name": "no_retry_2_params", }, - "UpdateTableSpec": { + "ExportModel": { "timeout_millis": 5000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "retry_codes_name": "no_retry_2_codes", + "retry_params_name": "no_retry_2_params", }, - "GetColumnSpec": { + "ExportEvaluatedExamples": { "timeout_millis": 5000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "retry_codes_name": "no_retry_2_codes", + "retry_params_name": "no_retry_2_params", }, - "ListColumnSpecs": { + "GetModelEvaluation": { "timeout_millis": 5000, - "retry_codes_name": "idempotent", - "retry_params_name": "default", + "retry_codes_name": "retry_policy_1_codes", + "retry_params_name": "retry_policy_1_params", }, - "UpdateColumnSpec": { - "timeout_millis": 5000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "ListModelEvaluations": { + "timeout_millis": 50000, + "retry_codes_name": "no_retry_2_codes", + "retry_params_name": "no_retry_2_params", }, }, } diff --git a/google/cloud/automl_v1beta1/gapic/enums.py b/google/cloud/automl_v1beta1/gapic/enums.py index 9f09f9ce..2560c4f9 100644 --- a/google/cloud/automl_v1beta1/gapic/enums.py +++ b/google/cloud/automl_v1beta1/gapic/enums.py @@ -36,8 +36,8 @@ class ClassificationType(enum.IntEnum): class NullValue(enum.IntEnum): """ - ``NullValue`` is a singleton enumeration to represent the null value for - the ``Value`` type union. + ``NullValue`` is a singleton enumeration to represent the null value + for the ``Value`` type union. The JSON representation for ``NullValue`` is JSON ``null``. @@ -61,17 +61,17 @@ class TypeCode(enum.IntEnum): ``date-time`` format, where ``time-offset`` = ``"Z"`` (e.g. 1985-04-12T23:20:50.52Z). STRING (int): Encoded as ``string``. - ARRAY (int): Encoded as ``list``, where the list elements are represented according - to + ARRAY (int): Encoded as ``list``, where the list elements are represented + according to ``list_element_type``. - STRUCT (int): Encoded as ``struct``, where field values are represented according to - ``struct_type``. - CATEGORY (int): Values of this type are not further understood by AutoML, e.g. AutoML is - unable to tell the order of values (as it could with FLOAT64), or is - unable to say if one value contains another (as it could with STRING). - Encoded as ``string`` (bytes should be base64-encoded, as described in - RFC 4648, section 4). + STRUCT (int): Encoded as ``struct``, where field values are represented according + to ``struct_type``. + CATEGORY (int): Values of this type are not further understood by AutoML, e.g. + AutoML is unable to tell the order of values (as it could with FLOAT64), + or is unable to say if one value contains another (as it could with + STRING). Encoded as ``string`` (bytes should be base64-encoded, as + described in RFC 4648, section 4). """ TYPE_CODE_UNSPECIFIED = 0 @@ -94,12 +94,12 @@ class TextSegmentType(enum.IntEnum): TOKEN (int): The text segment is a token. e.g. word. PARAGRAPH (int): The text segment is a paragraph. FORM_FIELD (int): The text segment is a form field. - FORM_FIELD_NAME (int): The text segment is the name part of a form field. It will be treated as - child of another FORM\_FIELD TextSegment if its span is subspan of - another TextSegment with type FORM\_FIELD. - FORM_FIELD_CONTENTS (int): The text segment is the text content part of a form field. It will be - treated as child of another FORM\_FIELD TextSegment if its span is - subspan of another TextSegment with type FORM\_FIELD. + FORM_FIELD_NAME (int): The text segment is the name part of a form field. It will be + treated as child of another FORM_FIELD TextSegment if its span is + subspan of another TextSegment with type FORM_FIELD. + FORM_FIELD_CONTENTS (int): The text segment is the text content part of a form field. It will + be treated as child of another FORM_FIELD TextSegment if its span is + subspan of another TextSegment with type FORM_FIELD. TABLE (int): The text segment is a whole table, including headers, and all rows. TABLE_HEADER (int): The text segment is a table's headers. It will be treated as child of another TABLE TextSegment if its span is subspan of another TextSegment @@ -108,8 +108,8 @@ class TextSegmentType(enum.IntEnum): another TABLE TextSegment if its span is subspan of another TextSegment with type TABLE. TABLE_CELL (int): The text segment is a cell in table. It will be treated as child of - another TABLE\_ROW TextSegment if its span is subspan of another - TextSegment with type TABLE\_ROW. + another TABLE_ROW TextSegment if its span is subspan of another + TextSegment with type TABLE_ROW. """ TEXT_SEGMENT_TYPE_UNSPECIFIED = 0 diff --git a/google/cloud/automl_v1beta1/gapic/prediction_service_client.py b/google/cloud/automl_v1beta1/gapic/prediction_service_client.py index 57cedc90..2bcb31ca 100644 --- a/google/cloud/automl_v1beta1/gapic/prediction_service_client.py +++ b/google/cloud/automl_v1beta1/gapic/prediction_service_client.py @@ -36,26 +36,15 @@ from google.cloud.automl_v1beta1.gapic.transports import ( prediction_service_grpc_transport, ) -from google.cloud.automl_v1beta1.proto import annotation_spec_pb2 -from google.cloud.automl_v1beta1.proto import column_spec_pb2 from google.cloud.automl_v1beta1.proto import data_items_pb2 -from google.cloud.automl_v1beta1.proto import dataset_pb2 -from google.cloud.automl_v1beta1.proto import image_pb2 from google.cloud.automl_v1beta1.proto import io_pb2 -from google.cloud.automl_v1beta1.proto import model_evaluation_pb2 -from google.cloud.automl_v1beta1.proto import model_pb2 from google.cloud.automl_v1beta1.proto import operations_pb2 as proto_operations_pb2 from google.cloud.automl_v1beta1.proto import prediction_service_pb2 from google.cloud.automl_v1beta1.proto import prediction_service_pb2_grpc -from google.cloud.automl_v1beta1.proto import service_pb2 -from google.cloud.automl_v1beta1.proto import service_pb2_grpc -from google.cloud.automl_v1beta1.proto import table_spec_pb2 from google.longrunning import operations_pb2 as longrunning_operations_pb2 -from google.protobuf import empty_pb2 -from google.protobuf import field_mask_pb2 -_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-automl").version +_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-automl",).version class PredictionServiceClient(object): @@ -63,7 +52,7 @@ class PredictionServiceClient(object): AutoML Prediction API. On any input that is documented to expect a string parameter in - snake\_case or kebab-case, either of those cases is accepted. + snake_case or kebab-case, either of those cases is accepted. """ SERVICE_ADDRESS = "automl.googleapis.com:443" @@ -190,12 +179,12 @@ def __init__( self.transport = transport else: self.transport = prediction_service_grpc_transport.PredictionServiceGrpcTransport( - address=api_endpoint, channel=channel, credentials=credentials + address=api_endpoint, channel=channel, credentials=credentials, ) if client_info is None: client_info = google.api_core.gapic_v1.client_info.ClientInfo( - gapic_version=_GAPIC_LIBRARY_VERSION + gapic_version=_GAPIC_LIBRARY_VERSION, ) else: client_info.gapic_version = _GAPIC_LIBRARY_VERSION @@ -206,7 +195,7 @@ def __init__( # (Ordinarily, these are the defaults specified in the `*_config.py` # file next to this one.) self._method_configs = google.api_core.gapic_v1.config.parse_method_configs( - client_config["interfaces"][self._INTERFACE_NAME] + client_config["interfaces"][self._INTERFACE_NAME], ) # Save a dictionary of cached API call functions. @@ -231,9 +220,9 @@ def predict( expected request payloads: - Image Classification - Image in .JPEG, .GIF or .PNG format, - image\_bytes up to 30MB. + image_bytes up to 30MB. - Image Object Detection - Image in .JPEG, .GIF or .PNG format, - image\_bytes up to 30MB. + image_bytes up to 30MB. - Text Classification - TextSnippet, content up to 60,000 characters, UTF-8 encoded. - Text Extraction - TextSnippet, content up to 30,000 characters, UTF-8 @@ -261,14 +250,14 @@ def predict( >>> response = client.predict(name, payload) Args: - name (str): Name of the model requested to serve the prediction. + name (str): Required. Name of the model requested to serve the prediction. payload (Union[dict, ~google.cloud.automl_v1beta1.types.ExamplePayload]): Required. Payload to perform a prediction on. The payload must match the problem type that the model was trained to solve. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.automl_v1beta1.types.ExamplePayload` - params (dict[str -> str]): Additional domain-specific parameters, any string must be up to 25000 - characters long. + params (dict[str -> str]): Additional domain-specific parameters, any string must be up to + 25000 characters long. - For Image Classification: @@ -283,13 +272,9 @@ def predict( this number of bounding boxes will be returned in the response. Default is 100, the requested value may be limited by server. - - For Tables: ``feature_importance`` - (boolean) Whether - - [feature\_importance][[google.cloud.automl.v1beta1.TablesModelColumnInfo.feature\_importance] - should be populated in the returned - - [TablesAnnotation(-s)][[google.cloud.automl.v1beta1.TablesAnnotation]. - The default is false. + - For Tables: feature_importance - (boolean) Whether feature importance + should be populated in the returned TablesAnnotation. The default is + false. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -321,7 +306,7 @@ def predict( ) request = prediction_service_pb2.PredictRequest( - name=name, payload=payload, params=params + name=name, payload=payload, params=params, ) if metadata is None: metadata = [] @@ -345,7 +330,7 @@ def batch_predict( name, input_config, output_config, - params=None, + params, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, @@ -377,7 +362,10 @@ def batch_predict( >>> # TODO: Initialize `output_config`: >>> output_config = {} >>> - >>> response = client.batch_predict(name, input_config, output_config) + >>> # TODO: Initialize `params`: + >>> params = {} + >>> + >>> response = client.batch_predict(name, input_config, output_config, params) >>> >>> def callback(operation_future): ... # Handle result. @@ -389,7 +377,7 @@ def batch_predict( >>> metadata = response.metadata() Args: - name (str): Name of the model requested to serve the batch prediction. + name (str): Required. Name of the model requested to serve the batch prediction. input_config (Union[dict, ~google.cloud.automl_v1beta1.types.BatchPredictInputConfig]): Required. The input configuration for batch prediction. If a dict is provided, it must be of the same form as the protobuf @@ -399,8 +387,8 @@ def batch_predict( If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.automl_v1beta1.types.BatchPredictOutputConfig` - params (dict[str -> str]): Additional domain-specific parameters for the predictions, any string - must be up to 25000 characters long. + params (dict[str -> str]): Required. Additional domain-specific parameters for the predictions, + any string must be up to 25000 characters long. - For Text Classification: @@ -423,22 +411,24 @@ def batch_predict( bounding boxes will be produced per image. Default is 100, the requested value may be limited by server. - - For Video Classification : ``score_threshold`` - (float) A value from - 0.0 to 1.0. When the model makes predictions for a video, it will - only produce results that have at least this confidence score. The - default is 0.5. ``segment_classification`` - (boolean) Set to true to - request segment-level classification. AutoML Video Intelligence - returns labels and their confidence scores for the entire segment of - the video that user specified in the request configuration. The - default is "true". ``shot_classification`` - (boolean) Set to true to - request shot-level classification. AutoML Video Intelligence - determines the boundaries for each camera shot in the entire segment - of the video that user specified in the request configuration. AutoML - Video Intelligence then returns labels and their confidence scores - for each detected shot, along with the start and end time of the - shot. WARNING: Model evaluation is not done for this classification - type, the quality of it depends on training data, but there are no - metrics provided to describe that quality. The default is "false". + - For Video Classification : + + ``score_threshold`` - (float) A value from 0.0 to 1.0. When the model + makes predictions for a video, it will only produce results that have + at least this confidence score. The default is 0.5. + ``segment_classification`` - (boolean) Set to true to request + segment-level classification. AutoML Video Intelligence returns + labels and their confidence scores for the entire segment of the + video that user specified in the request configuration. The default + is "true". ``shot_classification`` - (boolean) Set to true to request + shot-level classification. AutoML Video Intelligence determines the + boundaries for each camera shot in the entire segment of the video + that user specified in the request configuration. AutoML Video + Intelligence then returns labels and their confidence scores for each + detected shot, along with the start and end time of the shot. + WARNING: Model evaluation is not done for this classification type, + the quality of it depends on training data, but there are no metrics + provided to describe that quality. The default is "false". ``1s_interval_classification`` - (boolean) Set to true to request classification for a video at one-second intervals. AutoML Video Intelligence returns labels and their confidence scores for each @@ -448,15 +438,22 @@ def batch_predict( there are no metrics provided to describe that quality. The default is "false". - - For Video Object Tracking: ``score_threshold`` - (float) When Model - detects objects on video frames, it will only produce bounding boxes - which have at least this confidence score. Value in 0 to 1 range, - default is 0.5. ``max_bounding_box_count`` - (int64) No more than - this number of bounding boxes will be returned per frame. Default is - 100, the requested value may be limited by server. - ``min_bounding_box_size`` - (float) Only bounding boxes with shortest - edge at least that long as a relative value of video frame size will - be returned. Value in 0 to 1 range. Default is 0. + - For Tables: + + feature_importance - (boolean) Whether feature importance should be + populated in the returned TablesAnnotations. The default is false. + + - For Video Object Tracking: + + ``score_threshold`` - (float) When Model detects objects on video + frames, it will only produce bounding boxes which have at least this + confidence score. Value in 0 to 1 range, default is 0.5. + ``max_bounding_box_count`` - (int64) No more than this number of + bounding boxes will be returned per frame. Default is 100, the + requested value may be limited by server. ``min_bounding_box_size`` - + (float) Only bounding boxes with shortest edge at least that long as + a relative value of video frame size will be returned. Value in 0 to + 1 range. Default is 0. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. diff --git a/google/cloud/automl_v1beta1/gapic/prediction_service_client_config.py b/google/cloud/automl_v1beta1/gapic/prediction_service_client_config.py index d93ca92f..76c85878 100644 --- a/google/cloud/automl_v1beta1/gapic/prediction_service_client_config.py +++ b/google/cloud/automl_v1beta1/gapic/prediction_service_client_config.py @@ -1,31 +1,37 @@ config = { "interfaces": { "google.cloud.automl.v1beta1.PredictionService": { - "retry_codes": { - "idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"], - "non_idempotent": [], - }, + "retry_codes": {"no_retry_codes": [], "no_retry_1_codes": []}, "retry_params": { - "default": { - "initial_retry_delay_millis": 100, - "retry_delay_multiplier": 1.3, - "max_retry_delay_millis": 60000, + "no_retry_params": { + "initial_retry_delay_millis": 0, + "retry_delay_multiplier": 0.0, + "max_retry_delay_millis": 0, + "initial_rpc_timeout_millis": 0, + "rpc_timeout_multiplier": 1.0, + "max_rpc_timeout_millis": 0, + "total_timeout_millis": 0, + }, + "no_retry_1_params": { + "initial_retry_delay_millis": 0, + "retry_delay_multiplier": 0.0, + "max_retry_delay_millis": 0, "initial_rpc_timeout_millis": 60000, "rpc_timeout_multiplier": 1.0, "max_rpc_timeout_millis": 60000, - "total_timeout_millis": 600000, - } + "total_timeout_millis": 60000, + }, }, "methods": { "Predict": { "timeout_millis": 60000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "retry_codes_name": "no_retry_1_codes", + "retry_params_name": "no_retry_1_params", }, "BatchPredict": { "timeout_millis": 20000, - "retry_codes_name": "non_idempotent", - "retry_params_name": "default", + "retry_codes_name": "no_retry_1_codes", + "retry_params_name": "no_retry_1_params", }, }, } diff --git a/google/cloud/automl_v1beta1/gapic/transports/auto_ml_grpc_transport.py b/google/cloud/automl_v1beta1/gapic/transports/auto_ml_grpc_transport.py index 106c3c6c..8dcd0ec1 100644 --- a/google/cloud/automl_v1beta1/gapic/transports/auto_ml_grpc_transport.py +++ b/google/cloud/automl_v1beta1/gapic/transports/auto_ml_grpc_transport.py @@ -54,7 +54,7 @@ def __init__( # exception (channels come with credentials baked in already). if channel is not None and credentials is not None: raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive." + "The `channel` and `credentials` arguments are mutually " "exclusive.", ) # Create the channel. @@ -72,7 +72,9 @@ def __init__( # gRPC uses objects called "stubs" that are bound to the # channel and provide a basic method for each RPC. - self._stubs = {"auto_ml_stub": service_pb2_grpc.AutoMlStub(channel)} + self._stubs = { + "auto_ml_stub": service_pb2_grpc.AutoMlStub(channel), + } # Because this API includes a method that returns a # long-running operation (proto: google.longrunning.Operation), @@ -127,50 +129,50 @@ def create_dataset(self): return self._stubs["auto_ml_stub"].CreateDataset @property - def update_dataset(self): - """Return the gRPC stub for :meth:`AutoMlClient.update_dataset`. + def get_dataset(self): + """Return the gRPC stub for :meth:`AutoMlClient.get_dataset`. - Updates a dataset. + Gets a dataset. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].UpdateDataset + return self._stubs["auto_ml_stub"].GetDataset @property - def get_dataset(self): - """Return the gRPC stub for :meth:`AutoMlClient.get_dataset`. + def list_datasets(self): + """Return the gRPC stub for :meth:`AutoMlClient.list_datasets`. - Gets a dataset. + Lists datasets in a project. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].GetDataset + return self._stubs["auto_ml_stub"].ListDatasets @property - def list_datasets(self): - """Return the gRPC stub for :meth:`AutoMlClient.list_datasets`. + def update_dataset(self): + """Return the gRPC stub for :meth:`AutoMlClient.update_dataset`. - Lists datasets in a project. + Updates a dataset. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].ListDatasets + return self._stubs["auto_ml_stub"].UpdateDataset @property def delete_dataset(self): """Return the gRPC stub for :meth:`AutoMlClient.delete_dataset`. - Deletes a dataset and all of its contents. Returns empty response in the - ``response`` field when it completes, and ``delete_details`` in the + Deletes a dataset and all of its contents. Returns empty response in + the ``response`` field when it completes, and ``delete_details`` in the ``metadata`` field. Returns: @@ -184,8 +186,8 @@ def delete_dataset(self): def import_data(self): """Return the gRPC stub for :meth:`AutoMlClient.import_data`. - Imports data into a dataset. For Tables this method can only be called - on an empty Dataset. + Imports data into a dataset. For Tables this method can only be + called on an empty Dataset. For Tables: @@ -204,8 +206,8 @@ def import_data(self): def export_data(self): """Return the gRPC stub for :meth:`AutoMlClient.export_data`. - Exports dataset's data to the provided output location. Returns an empty - response in the ``response`` field when it completes. + Exports dataset's data to the provided output location. Returns an + empty response in the ``response`` field when it completes. Returns: Callable: A callable which accepts the appropriate @@ -215,260 +217,261 @@ def export_data(self): return self._stubs["auto_ml_stub"].ExportData @property - def create_model(self): - """Return the gRPC stub for :meth:`AutoMlClient.create_model`. + def get_annotation_spec(self): + """Return the gRPC stub for :meth:`AutoMlClient.get_annotation_spec`. - Creates a model. Returns a Model in the ``response`` field when it - completes. When you create a model, several model evaluations are - created for it: a global evaluation, and one evaluation for each - annotation spec. + Gets an annotation spec. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].CreateModel + return self._stubs["auto_ml_stub"].GetAnnotationSpec @property - def get_model(self): - """Return the gRPC stub for :meth:`AutoMlClient.get_model`. + def get_table_spec(self): + """Return the gRPC stub for :meth:`AutoMlClient.get_table_spec`. - Gets a model. + Gets a table spec. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].GetModel + return self._stubs["auto_ml_stub"].GetTableSpec @property - def list_models(self): - """Return the gRPC stub for :meth:`AutoMlClient.list_models`. + def list_table_specs(self): + """Return the gRPC stub for :meth:`AutoMlClient.list_table_specs`. - Lists models. + Lists table specs in a dataset. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].ListModels + return self._stubs["auto_ml_stub"].ListTableSpecs @property - def delete_model(self): - """Return the gRPC stub for :meth:`AutoMlClient.delete_model`. + def update_table_spec(self): + """Return the gRPC stub for :meth:`AutoMlClient.update_table_spec`. - Deletes a model. Returns ``google.protobuf.Empty`` in the ``response`` - field when it completes, and ``delete_details`` in the ``metadata`` - field. + Updates a table spec. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].DeleteModel + return self._stubs["auto_ml_stub"].UpdateTableSpec @property - def deploy_model(self): - """Return the gRPC stub for :meth:`AutoMlClient.deploy_model`. - - Deploys a model. If a model is already deployed, deploying it with the - same parameters has no effect. Deploying with different parametrs (as - e.g. changing - - ``node_number``) will reset the deployment state without pausing the - model's availability. - - Only applicable for Text Classification, Image Object Detection and - Tables; all other domains manage deployment automatically. + def get_column_spec(self): + """Return the gRPC stub for :meth:`AutoMlClient.get_column_spec`. - Returns an empty response in the ``response`` field when it completes. + Gets a column spec. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].DeployModel + return self._stubs["auto_ml_stub"].GetColumnSpec @property - def undeploy_model(self): - """Return the gRPC stub for :meth:`AutoMlClient.undeploy_model`. - - Undeploys a model. If the model is not deployed this method has no - effect. - - Only applicable for Text Classification, Image Object Detection and - Tables; all other domains manage deployment automatically. + def list_column_specs(self): + """Return the gRPC stub for :meth:`AutoMlClient.list_column_specs`. - Returns an empty response in the ``response`` field when it completes. + Lists column specs in a table spec. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].UndeployModel + return self._stubs["auto_ml_stub"].ListColumnSpecs @property - def get_model_evaluation(self): - """Return the gRPC stub for :meth:`AutoMlClient.get_model_evaluation`. + def update_column_spec(self): + """Return the gRPC stub for :meth:`AutoMlClient.update_column_spec`. - Gets a model evaluation. + Updates a column spec. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].GetModelEvaluation + return self._stubs["auto_ml_stub"].UpdateColumnSpec @property - def export_model(self): - """Return the gRPC stub for :meth:`AutoMlClient.export_model`. - - Exports a trained, "export-able", model to a user specified Google Cloud - Storage location. A model is considered export-able if and only if it - has an export format defined for it in - - ``ModelExportOutputConfig``. + def create_model(self): + """Return the gRPC stub for :meth:`AutoMlClient.create_model`. - Returns an empty response in the ``response`` field when it completes. + Creates a model. Returns a Model in the ``response`` field when it + completes. When you create a model, several model evaluations are + created for it: a global evaluation, and one evaluation for each + annotation spec. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].ExportModel + return self._stubs["auto_ml_stub"].CreateModel @property - def export_evaluated_examples(self): - """Return the gRPC stub for :meth:`AutoMlClient.export_evaluated_examples`. - - Exports examples on which the model was evaluated (i.e. which were in - the TEST set of the dataset the model was created from), together with - their ground truth annotations and the annotations created (predicted) - by the model. The examples, ground truth and predictions are exported in - the state they were at the moment the model was evaluated. - - This export is available only for 30 days since the model evaluation is - created. - - Currently only available for Tables. + def get_model(self): + """Return the gRPC stub for :meth:`AutoMlClient.get_model`. - Returns an empty response in the ``response`` field when it completes. + Gets a model. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].ExportEvaluatedExamples + return self._stubs["auto_ml_stub"].GetModel @property - def list_model_evaluations(self): - """Return the gRPC stub for :meth:`AutoMlClient.list_model_evaluations`. + def list_models(self): + """Return the gRPC stub for :meth:`AutoMlClient.list_models`. - Lists model evaluations. + Lists models. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].ListModelEvaluations + return self._stubs["auto_ml_stub"].ListModels @property - def get_annotation_spec(self): - """Return the gRPC stub for :meth:`AutoMlClient.get_annotation_spec`. + def delete_model(self): + """Return the gRPC stub for :meth:`AutoMlClient.delete_model`. - Gets an annotation spec. + Deletes a model. Returns ``google.protobuf.Empty`` in the + ``response`` field when it completes, and ``delete_details`` in the + ``metadata`` field. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].GetAnnotationSpec + return self._stubs["auto_ml_stub"].DeleteModel @property - def get_table_spec(self): - """Return the gRPC stub for :meth:`AutoMlClient.get_table_spec`. + def deploy_model(self): + """Return the gRPC stub for :meth:`AutoMlClient.deploy_model`. - Gets a table spec. + Deploys a model. If a model is already deployed, deploying it with + the same parameters has no effect. Deploying with different parametrs + (as e.g. changing + + ``node_number``) will reset the deployment state without pausing the + model's availability. + + Only applicable for Text Classification, Image Object Detection , + Tables, and Image Segmentation; all other domains manage deployment + automatically. + + Returns an empty response in the ``response`` field when it completes. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].GetTableSpec + return self._stubs["auto_ml_stub"].DeployModel @property - def list_table_specs(self): - """Return the gRPC stub for :meth:`AutoMlClient.list_table_specs`. + def undeploy_model(self): + """Return the gRPC stub for :meth:`AutoMlClient.undeploy_model`. - Lists table specs in a dataset. + Undeploys a model. If the model is not deployed this method has no + effect. + + Only applicable for Text Classification, Image Object Detection and + Tables; all other domains manage deployment automatically. + + Returns an empty response in the ``response`` field when it completes. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].ListTableSpecs + return self._stubs["auto_ml_stub"].UndeployModel @property - def update_table_spec(self): - """Return the gRPC stub for :meth:`AutoMlClient.update_table_spec`. + def export_model(self): + """Return the gRPC stub for :meth:`AutoMlClient.export_model`. - Updates a table spec. + Exports a trained, "export-able", model to a user specified Google + Cloud Storage location. A model is considered export-able if and only if + it has an export format defined for it in + + ``ModelExportOutputConfig``. + + Returns an empty response in the ``response`` field when it completes. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].UpdateTableSpec + return self._stubs["auto_ml_stub"].ExportModel @property - def get_column_spec(self): - """Return the gRPC stub for :meth:`AutoMlClient.get_column_spec`. + def export_evaluated_examples(self): + """Return the gRPC stub for :meth:`AutoMlClient.export_evaluated_examples`. - Gets a column spec. + Exports examples on which the model was evaluated (i.e. which were + in the TEST set of the dataset the model was created from), together + with their ground truth annotations and the annotations created + (predicted) by the model. The examples, ground truth and predictions are + exported in the state they were at the moment the model was evaluated. + + This export is available only for 30 days since the model evaluation is + created. + + Currently only available for Tables. + + Returns an empty response in the ``response`` field when it completes. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].GetColumnSpec + return self._stubs["auto_ml_stub"].ExportEvaluatedExamples @property - def list_column_specs(self): - """Return the gRPC stub for :meth:`AutoMlClient.list_column_specs`. + def get_model_evaluation(self): + """Return the gRPC stub for :meth:`AutoMlClient.get_model_evaluation`. - Lists column specs in a table spec. + Gets a model evaluation. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].ListColumnSpecs + return self._stubs["auto_ml_stub"].GetModelEvaluation @property - def update_column_spec(self): - """Return the gRPC stub for :meth:`AutoMlClient.update_column_spec`. + def list_model_evaluations(self): + """Return the gRPC stub for :meth:`AutoMlClient.list_model_evaluations`. - Updates a column spec. + Lists model evaluations. Returns: Callable: A callable which accepts the appropriate deserialized request object and returns a deserialized response object. """ - return self._stubs["auto_ml_stub"].UpdateColumnSpec + return self._stubs["auto_ml_stub"].ListModelEvaluations diff --git a/google/cloud/automl_v1beta1/gapic/transports/prediction_service_grpc_transport.py b/google/cloud/automl_v1beta1/gapic/transports/prediction_service_grpc_transport.py index 69ebca84..6f2b37b1 100644 --- a/google/cloud/automl_v1beta1/gapic/transports/prediction_service_grpc_transport.py +++ b/google/cloud/automl_v1beta1/gapic/transports/prediction_service_grpc_transport.py @@ -54,7 +54,7 @@ def __init__( # exception (channels come with credentials baked in already). if channel is not None and credentials is not None: raise ValueError( - "The `channel` and `credentials` arguments are mutually " "exclusive." + "The `channel` and `credentials` arguments are mutually " "exclusive.", ) # Create the channel. @@ -75,7 +75,7 @@ def __init__( self._stubs = { "prediction_service_stub": prediction_service_pb2_grpc.PredictionServiceStub( channel - ) + ), } # Because this API includes a method that returns a @@ -126,9 +126,9 @@ def predict(self): expected request payloads: - Image Classification - Image in .JPEG, .GIF or .PNG format, - image\_bytes up to 30MB. + image_bytes up to 30MB. - Image Object Detection - Image in .JPEG, .GIF or .PNG format, - image\_bytes up to 30MB. + image_bytes up to 30MB. - Text Classification - TextSnippet, content up to 60,000 characters, UTF-8 encoded. - Text Extraction - TextSnippet, content up to 30,000 characters, UTF-8 diff --git a/google/cloud/automl_v1beta1/proto/annotation_payload.proto b/google/cloud/automl_v1beta1/proto/annotation_payload.proto index 7cc2860f..f62bb269 100644 --- a/google/cloud/automl_v1beta1/proto/annotation_payload.proto +++ b/google/cloud/automl_v1beta1/proto/annotation_payload.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; diff --git a/google/cloud/automl_v1beta1/proto/annotation_payload_pb2.py b/google/cloud/automl_v1beta1/proto/annotation_payload_pb2.py index f8036318..bf06fb77 100644 --- a/google/cloud/automl_v1beta1/proto/annotation_payload_pb2.py +++ b/google/cloud/automl_v1beta1/proto/annotation_payload_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/annotation_payload.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -41,12 +38,9 @@ name="google/cloud/automl_v1beta1/proto/annotation_payload.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - '\n:google/cloud/automl_v1beta1/proto/annotation_payload.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto\x1a\x31google/cloud/automl_v1beta1/proto/detection.proto\x1a.google/cloud/automl_v1beta1/proto/tables.proto\x1a\x37google/cloud/automl_v1beta1/proto/text_extraction.proto\x1a\x36google/cloud/automl_v1beta1/proto/text_sentiment.proto\x1a\x33google/cloud/automl_v1beta1/proto/translation.proto\x1a\x19google/protobuf/any.proto\x1a\x1cgoogle/api/annotations.proto"\xe6\x05\n\x11\x41nnotationPayload\x12I\n\x0btranslation\x18\x02 \x01(\x0b\x32\x32.google.cloud.automl.v1beta1.TranslationAnnotationH\x00\x12O\n\x0e\x63lassification\x18\x03 \x01(\x0b\x32\x35.google.cloud.automl.v1beta1.ClassificationAnnotationH\x00\x12]\n\x16image_object_detection\x18\x04 \x01(\x0b\x32;.google.cloud.automl.v1beta1.ImageObjectDetectionAnnotationH\x00\x12Z\n\x14video_classification\x18\t \x01(\x0b\x32:.google.cloud.automl.v1beta1.VideoClassificationAnnotationH\x00\x12[\n\x15video_object_tracking\x18\x08 \x01(\x0b\x32:.google.cloud.automl.v1beta1.VideoObjectTrackingAnnotationH\x00\x12P\n\x0ftext_extraction\x18\x06 \x01(\x0b\x32\x35.google.cloud.automl.v1beta1.TextExtractionAnnotationH\x00\x12N\n\x0etext_sentiment\x18\x07 \x01(\x0b\x32\x34.google.cloud.automl.v1beta1.TextSentimentAnnotationH\x00\x12?\n\x06tables\x18\n \x01(\x0b\x32-.google.cloud.automl.v1beta1.TablesAnnotationH\x00\x12\x1a\n\x12\x61nnotation_spec_id\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x05 \x01(\tB\x08\n\x06\x64\x65tailB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3' - ), + serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n:google/cloud/automl_v1beta1/proto/annotation_payload.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto\x1a\x31google/cloud/automl_v1beta1/proto/detection.proto\x1a.google/cloud/automl_v1beta1/proto/tables.proto\x1a\x37google/cloud/automl_v1beta1/proto/text_extraction.proto\x1a\x36google/cloud/automl_v1beta1/proto/text_sentiment.proto\x1a\x33google/cloud/automl_v1beta1/proto/translation.proto\x1a\x19google/protobuf/any.proto\x1a\x1cgoogle/api/annotations.proto"\xe6\x05\n\x11\x41nnotationPayload\x12I\n\x0btranslation\x18\x02 \x01(\x0b\x32\x32.google.cloud.automl.v1beta1.TranslationAnnotationH\x00\x12O\n\x0e\x63lassification\x18\x03 \x01(\x0b\x32\x35.google.cloud.automl.v1beta1.ClassificationAnnotationH\x00\x12]\n\x16image_object_detection\x18\x04 \x01(\x0b\x32;.google.cloud.automl.v1beta1.ImageObjectDetectionAnnotationH\x00\x12Z\n\x14video_classification\x18\t \x01(\x0b\x32:.google.cloud.automl.v1beta1.VideoClassificationAnnotationH\x00\x12[\n\x15video_object_tracking\x18\x08 \x01(\x0b\x32:.google.cloud.automl.v1beta1.VideoObjectTrackingAnnotationH\x00\x12P\n\x0ftext_extraction\x18\x06 \x01(\x0b\x32\x35.google.cloud.automl.v1beta1.TextExtractionAnnotationH\x00\x12N\n\x0etext_sentiment\x18\x07 \x01(\x0b\x32\x34.google.cloud.automl.v1beta1.TextSentimentAnnotationH\x00\x12?\n\x06tables\x18\n \x01(\x0b\x32-.google.cloud.automl.v1beta1.TablesAnnotationH\x00\x12\x1a\n\x12\x61nnotation_spec_id\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x05 \x01(\tB\x08\n\x06\x64\x65tailB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_detection__pb2.DESCRIPTOR, @@ -66,6 +60,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="translation", @@ -84,6 +79,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="classification", @@ -102,6 +98,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="image_object_detection", @@ -120,6 +117,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="video_classification", @@ -138,6 +136,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="video_object_tracking", @@ -156,6 +155,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="text_extraction", @@ -174,6 +174,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="text_sentiment", @@ -192,6 +193,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="tables", @@ -210,6 +212,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="annotation_spec_id", @@ -220,7 +223,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -228,6 +231,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="display_name", @@ -238,7 +242,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -246,6 +250,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -261,8 +266,9 @@ full_name="google.cloud.automl.v1beta1.AnnotationPayload.detail", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], - ) + ), ], serialized_start=470, serialized_end=1212, @@ -362,12 +368,10 @@ AnnotationPayload = _reflection.GeneratedProtocolMessageType( "AnnotationPayload", (_message.Message,), - dict( - DESCRIPTOR=_ANNOTATIONPAYLOAD, - __module__="google.cloud.automl_v1beta1.proto.annotation_payload_pb2", - __doc__="""Contains annotation information that is relevant to - AutoML. - + { + "DESCRIPTOR": _ANNOTATIONPAYLOAD, + "__module__": "google.cloud.automl_v1beta1.proto.annotation_payload_pb2", + "__doc__": """Contains annotation information that is relevant to AutoML. Attributes: detail: @@ -396,15 +400,15 @@ an ancestor dataset, or the dataset that was used to train the model in use. display_name: - Output only. The value of [display\_name][google.cloud.automl. - v1beta1.AnnotationSpec.display\_name] when the model was + Output only. The value of [display_name][google.cloud.automl.v + 1beta1.AnnotationSpec.display_name] when the model was trained. Because this field returns a value at model training time, for different models trained using the same dataset, the returned value could be different as model owner could update the ``display_name`` between any two model training. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.AnnotationPayload) - ), + }, ) _sym_db.RegisterMessage(AnnotationPayload) diff --git a/google/cloud/automl_v1beta1/proto/annotation_spec.proto b/google/cloud/automl_v1beta1/proto/annotation_spec.proto index 483792b6..d9df07ee 100644 --- a/google/cloud/automl_v1beta1/proto/annotation_spec.proto +++ b/google/cloud/automl_v1beta1/proto/annotation_spec.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,12 +11,12 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; package google.cloud.automl.v1beta1; +import "google/api/resource.proto"; import "google/api/annotations.proto"; option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl"; @@ -27,16 +27,19 @@ option ruby_package = "Google::Cloud::AutoML::V1beta1"; // A definition of an annotation spec. message AnnotationSpec { + option (google.api.resource) = { + type: "automl.googleapis.com/AnnotationSpec" + pattern: "projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}" + }; + // Output only. Resource name of the annotation spec. // Form: // // 'projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/annotationSpecs/{annotation_spec_id}' string name = 1; - // Required. - // The name of the annotation spec to show in the interface. The name can be + // Required. The name of the annotation spec to show in the interface. The name can be // up to 32 characters long and must match the regexp `[a-zA-Z0-9_]+`. - // (_), and ASCII digits 0-9. string display_name = 2; // Output only. The number of examples in the parent dataset diff --git a/google/cloud/automl_v1beta1/proto/annotation_spec_pb2.py b/google/cloud/automl_v1beta1/proto/annotation_spec_pb2.py index 80ae0a8a..c259a290 100644 --- a/google/cloud/automl_v1beta1/proto/annotation_spec_pb2.py +++ b/google/cloud/automl_v1beta1/proto/annotation_spec_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/annotation_spec.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -15,6 +12,7 @@ _sym_db = _symbol_database.Default() +from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 @@ -22,13 +20,13 @@ name="google/cloud/automl_v1beta1/proto/annotation_spec.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - '\n7google/cloud/automl_v1beta1/proto/annotation_spec.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto"K\n\x0e\x41nnotationSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12\x15\n\rexample_count\x18\t \x01(\x05\x42\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3' - ), - dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR], + serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n7google/cloud/automl_v1beta1/proto/annotation_spec.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x19google/api/resource.proto\x1a\x1cgoogle/api/annotations.proto"\xd6\x01\n\x0e\x41nnotationSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12\x15\n\rexample_count\x18\t \x01(\x05:\x88\x01\xea\x41\x84\x01\n$automl.googleapis.com/AnnotationSpec\x12\\projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}B\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', + dependencies=[ + google_dot_api_dot_resource__pb2.DESCRIPTOR, + google_dot_api_dot_annotations__pb2.DESCRIPTOR, + ], ) @@ -38,6 +36,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -48,7 +47,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -56,6 +55,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="display_name", @@ -66,7 +66,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -74,6 +74,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="example_count", @@ -92,18 +93,19 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], nested_types=[], enum_types=[], - serialized_options=None, + serialized_options=b"\352A\204\001\n$automl.googleapis.com/AnnotationSpec\022\\projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}", is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=118, - serialized_end=193, + serialized_start=146, + serialized_end=360, ) DESCRIPTOR.message_types_by_name["AnnotationSpec"] = _ANNOTATIONSPEC @@ -112,31 +114,30 @@ AnnotationSpec = _reflection.GeneratedProtocolMessageType( "AnnotationSpec", (_message.Message,), - dict( - DESCRIPTOR=_ANNOTATIONSPEC, - __module__="google.cloud.automl_v1beta1.proto.annotation_spec_pb2", - __doc__="""A definition of an annotation spec. - + { + "DESCRIPTOR": _ANNOTATIONSPEC, + "__module__": "google.cloud.automl_v1beta1.proto.annotation_spec_pb2", + "__doc__": """A definition of an annotation spec. Attributes: name: - Output only. Resource name of the annotation spec. Form: 'pro - jects/{project\_id}/locations/{location\_id}/datasets/{dataset - \_id}/annotationSpecs/{annotation\_spec\_id}' + Output only. Resource name of the annotation spec. Form: ‘pro + jects/{project_id}/locations/{location_id}/datasets/{dataset_i + d}/annotationSpecs/{annotation_spec_id}’ display_name: Required. The name of the annotation spec to show in the interface. The name can be up to 32 characters long and must - match the regexp ``[a-zA-Z0-9_]+``. (\_), and ASCII digits - 0-9. + match the regexp ``[a-zA-Z0-9_]+``. example_count: Output only. The number of examples in the parent dataset labeled by the annotation spec. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.AnnotationSpec) - ), + }, ) _sym_db.RegisterMessage(AnnotationSpec) DESCRIPTOR._options = None +_ANNOTATIONSPEC._options = None # @@protoc_insertion_point(module_scope) diff --git a/google/cloud/automl_v1beta1/proto/classification.proto b/google/cloud/automl_v1beta1/proto/classification.proto index c8475542..0594d01e 100644 --- a/google/cloud/automl_v1beta1/proto/classification.proto +++ b/google/cloud/automl_v1beta1/proto/classification.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,14 +11,13 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; package google.cloud.automl.v1beta1; -import "google/api/annotations.proto"; import "google/cloud/automl/v1beta1/temporal.proto"; +import "google/api/annotations.proto"; option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl"; option java_outer_classname = "ClassificationProto"; @@ -126,10 +125,7 @@ message ClassificationEvaluationMetrics { // for each example. float false_positive_rate_at1 = 9; - // Output only. The harmonic mean of - // [recall_at1][google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.recall_at1] - // and - // [precision_at1][google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.precision_at1]. + // Output only. The harmonic mean of [recall_at1][google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.recall_at1] and [precision_at1][google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.precision_at1]. float f1_score_at1 = 7; // Output only. The number of model created labels that match a ground truth @@ -156,9 +152,7 @@ message ClassificationEvaluationMetrics { // Output only. Value of the specific cell in the confusion matrix. // The number of values each row has (i.e. the length of the row) is equal // to the length of the `annotation_spec_id` field or, if that one is not - // populated, length of the - // [display_name][google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfusionMatrix.display_name] - // field. + // populated, length of the [display_name][google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfusionMatrix.display_name] field. repeated int32 example_count = 1; } diff --git a/google/cloud/automl_v1beta1/proto/classification_pb2.py b/google/cloud/automl_v1beta1/proto/classification_pb2.py index 68651a84..9b38e2db 100644 --- a/google/cloud/automl_v1beta1/proto/classification_pb2.py +++ b/google/cloud/automl_v1beta1/proto/classification_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/classification.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message @@ -16,25 +13,22 @@ _sym_db = _symbol_database.Default() -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 from google.cloud.automl_v1beta1.proto import ( temporal_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_temporal__pb2, ) +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name="google/cloud/automl_v1beta1/proto/classification.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1B\023ClassificationProtoZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - '\n6google/cloud/automl_v1beta1/proto/classification.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x30google/cloud/automl_v1beta1/proto/temporal.proto")\n\x18\x43lassificationAnnotation\x12\r\n\x05score\x18\x01 \x01(\x02"\xc7\x01\n\x1dVideoClassificationAnnotation\x12\x0c\n\x04type\x18\x01 \x01(\t\x12X\n\x19\x63lassification_annotation\x18\x02 \x01(\x0b\x32\x35.google.cloud.automl.v1beta1.ClassificationAnnotation\x12>\n\x0ctime_segment\x18\x03 \x01(\x0b\x32(.google.cloud.automl.v1beta1.TimeSegment"\xa9\x07\n\x1f\x43lassificationEvaluationMetrics\x12\x0e\n\x06\x61u_prc\x18\x01 \x01(\x02\x12\x17\n\x0b\x62\x61se_au_prc\x18\x02 \x01(\x02\x42\x02\x18\x01\x12\x0e\n\x06\x61u_roc\x18\x06 \x01(\x02\x12\x10\n\x08log_loss\x18\x07 \x01(\x02\x12u\n\x18\x63onfidence_metrics_entry\x18\x03 \x03(\x0b\x32S.google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry\x12\x66\n\x10\x63onfusion_matrix\x18\x04 \x01(\x0b\x32L.google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfusionMatrix\x12\x1a\n\x12\x61nnotation_spec_id\x18\x05 \x03(\t\x1a\xfc\x02\n\x16\x43onfidenceMetricsEntry\x12\x1c\n\x14\x63onfidence_threshold\x18\x01 \x01(\x02\x12\x1a\n\x12position_threshold\x18\x0e \x01(\x05\x12\x0e\n\x06recall\x18\x02 \x01(\x02\x12\x11\n\tprecision\x18\x03 \x01(\x02\x12\x1b\n\x13\x66\x61lse_positive_rate\x18\x08 \x01(\x02\x12\x10\n\x08\x66\x31_score\x18\x04 \x01(\x02\x12\x12\n\nrecall_at1\x18\x05 \x01(\x02\x12\x15\n\rprecision_at1\x18\x06 \x01(\x02\x12\x1f\n\x17\x66\x61lse_positive_rate_at1\x18\t \x01(\x02\x12\x14\n\x0c\x66\x31_score_at1\x18\x07 \x01(\x02\x12\x1b\n\x13true_positive_count\x18\n \x01(\x03\x12\x1c\n\x14\x66\x61lse_positive_count\x18\x0b \x01(\x03\x12\x1c\n\x14\x66\x61lse_negative_count\x18\x0c \x01(\x03\x12\x1b\n\x13true_negative_count\x18\r \x01(\x03\x1a\xc0\x01\n\x0f\x43onfusionMatrix\x12\x1a\n\x12\x61nnotation_spec_id\x18\x01 \x03(\t\x12\x14\n\x0c\x64isplay_name\x18\x03 \x03(\t\x12]\n\x03row\x18\x02 \x03(\x0b\x32P.google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfusionMatrix.Row\x1a\x1c\n\x03Row\x12\x15\n\rexample_count\x18\x01 \x03(\x05*Y\n\x12\x43lassificationType\x12#\n\x1f\x43LASSIFICATION_TYPE_UNSPECIFIED\x10\x00\x12\x0e\n\nMULTICLASS\x10\x01\x12\x0e\n\nMULTILABEL\x10\x02\x42\xb8\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\x13\x43lassificationProtoZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3' - ), + serialized_options=b"\n\037com.google.cloud.automl.v1beta1B\023ClassificationProtoZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n6google/cloud/automl_v1beta1/proto/classification.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x30google/cloud/automl_v1beta1/proto/temporal.proto\x1a\x1cgoogle/api/annotations.proto")\n\x18\x43lassificationAnnotation\x12\r\n\x05score\x18\x01 \x01(\x02"\xc7\x01\n\x1dVideoClassificationAnnotation\x12\x0c\n\x04type\x18\x01 \x01(\t\x12X\n\x19\x63lassification_annotation\x18\x02 \x01(\x0b\x32\x35.google.cloud.automl.v1beta1.ClassificationAnnotation\x12>\n\x0ctime_segment\x18\x03 \x01(\x0b\x32(.google.cloud.automl.v1beta1.TimeSegment"\xa9\x07\n\x1f\x43lassificationEvaluationMetrics\x12\x0e\n\x06\x61u_prc\x18\x01 \x01(\x02\x12\x17\n\x0b\x62\x61se_au_prc\x18\x02 \x01(\x02\x42\x02\x18\x01\x12\x0e\n\x06\x61u_roc\x18\x06 \x01(\x02\x12\x10\n\x08log_loss\x18\x07 \x01(\x02\x12u\n\x18\x63onfidence_metrics_entry\x18\x03 \x03(\x0b\x32S.google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry\x12\x66\n\x10\x63onfusion_matrix\x18\x04 \x01(\x0b\x32L.google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfusionMatrix\x12\x1a\n\x12\x61nnotation_spec_id\x18\x05 \x03(\t\x1a\xfc\x02\n\x16\x43onfidenceMetricsEntry\x12\x1c\n\x14\x63onfidence_threshold\x18\x01 \x01(\x02\x12\x1a\n\x12position_threshold\x18\x0e \x01(\x05\x12\x0e\n\x06recall\x18\x02 \x01(\x02\x12\x11\n\tprecision\x18\x03 \x01(\x02\x12\x1b\n\x13\x66\x61lse_positive_rate\x18\x08 \x01(\x02\x12\x10\n\x08\x66\x31_score\x18\x04 \x01(\x02\x12\x12\n\nrecall_at1\x18\x05 \x01(\x02\x12\x15\n\rprecision_at1\x18\x06 \x01(\x02\x12\x1f\n\x17\x66\x61lse_positive_rate_at1\x18\t \x01(\x02\x12\x14\n\x0c\x66\x31_score_at1\x18\x07 \x01(\x02\x12\x1b\n\x13true_positive_count\x18\n \x01(\x03\x12\x1c\n\x14\x66\x61lse_positive_count\x18\x0b \x01(\x03\x12\x1c\n\x14\x66\x61lse_negative_count\x18\x0c \x01(\x03\x12\x1b\n\x13true_negative_count\x18\r \x01(\x03\x1a\xc0\x01\n\x0f\x43onfusionMatrix\x12\x1a\n\x12\x61nnotation_spec_id\x18\x01 \x03(\t\x12\x14\n\x0c\x64isplay_name\x18\x03 \x03(\t\x12]\n\x03row\x18\x02 \x03(\x0b\x32P.google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfusionMatrix.Row\x1a\x1c\n\x03Row\x12\x15\n\rexample_count\x18\x01 \x03(\x05*Y\n\x12\x43lassificationType\x12#\n\x1f\x43LASSIFICATION_TYPE_UNSPECIFIED\x10\x00\x12\x0e\n\nMULTICLASS\x10\x01\x12\x0e\n\nMULTILABEL\x10\x02\x42\xb8\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\x13\x43lassificationProtoZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_temporal__pb2.DESCRIPTOR, + google_dot_api_dot_annotations__pb2.DESCRIPTOR, ], ) @@ -43,6 +37,7 @@ full_name="google.cloud.automl.v1beta1.ClassificationType", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name="CLASSIFICATION_TYPE_UNSPECIFIED", @@ -50,12 +45,23 @@ number=0, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="MULTICLASS", index=1, number=1, serialized_options=None, type=None + name="MULTICLASS", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="MULTILABEL", index=2, number=2, serialized_options=None, type=None + name="MULTILABEL", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -77,6 +83,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="score", @@ -95,7 +102,8 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + create_key=_descriptor._internal_create_key, + ), ], extensions=[], nested_types=[], @@ -116,6 +124,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="type", @@ -126,7 +135,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -134,6 +143,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="classification_annotation", @@ -152,6 +162,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="time_segment", @@ -170,6 +181,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -191,6 +203,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="confidence_threshold", @@ -209,6 +222,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="position_threshold", @@ -227,6 +241,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="recall", @@ -245,6 +260,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="precision", @@ -263,6 +279,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="false_positive_rate", @@ -281,6 +298,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="f1_score", @@ -299,6 +317,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="recall_at1", @@ -317,6 +336,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="precision_at1", @@ -335,6 +355,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="false_positive_rate_at1", @@ -353,6 +374,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="f1_score_at1", @@ -371,6 +393,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="true_positive_count", @@ -389,6 +412,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="false_positive_count", @@ -407,6 +431,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="false_negative_count", @@ -425,6 +450,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="true_negative_count", @@ -443,6 +469,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -463,6 +490,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="example_count", @@ -481,7 +509,8 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + create_key=_descriptor._internal_create_key, + ), ], extensions=[], nested_types=[], @@ -501,6 +530,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="annotation_spec_id", @@ -519,6 +549,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="display_name", @@ -537,6 +568,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="row", @@ -555,10 +587,11 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], - nested_types=[_CLASSIFICATIONEVALUATIONMETRICS_CONFUSIONMATRIX_ROW], + nested_types=[_CLASSIFICATIONEVALUATIONMETRICS_CONFUSIONMATRIX_ROW,], enum_types=[], serialized_options=None, is_extendable=False, @@ -575,6 +608,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="au_prc", @@ -593,6 +627,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="base_au_prc", @@ -609,8 +644,9 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\030\001"), + serialized_options=b"\030\001", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="au_roc", @@ -629,6 +665,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="log_loss", @@ -647,6 +684,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="confidence_metrics_entry", @@ -665,6 +703,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="confusion_matrix", @@ -683,6 +722,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="annotation_spec_id", @@ -701,6 +741,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -757,11 +798,10 @@ ClassificationAnnotation = _reflection.GeneratedProtocolMessageType( "ClassificationAnnotation", (_message.Message,), - dict( - DESCRIPTOR=_CLASSIFICATIONANNOTATION, - __module__="google.cloud.automl_v1beta1.proto.classification_pb2", - __doc__="""Contains annotation details specific to classification. - + { + "DESCRIPTOR": _CLASSIFICATIONANNOTATION, + "__module__": "google.cloud.automl_v1beta1.proto.classification_pb2", + "__doc__": """Contains annotation details specific to classification. Attributes: score: @@ -772,19 +812,17 @@ an annotation, the score is 0 for negative or 1 for positive. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ClassificationAnnotation) - ), + }, ) _sym_db.RegisterMessage(ClassificationAnnotation) VideoClassificationAnnotation = _reflection.GeneratedProtocolMessageType( "VideoClassificationAnnotation", (_message.Message,), - dict( - DESCRIPTOR=_VIDEOCLASSIFICATIONANNOTATION, - __module__="google.cloud.automl_v1beta1.proto.classification_pb2", - __doc__="""Contains annotation details specific to video - classification. - + { + "DESCRIPTOR": _VIDEOCLASSIFICATIONANNOTATION, + "__module__": "google.cloud.automl_v1beta1.proto.classification_pb2", + "__doc__": """Contains annotation details specific to video classification. Attributes: type: @@ -817,22 +855,21 @@ annotation applies. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.VideoClassificationAnnotation) - ), + }, ) _sym_db.RegisterMessage(VideoClassificationAnnotation) ClassificationEvaluationMetrics = _reflection.GeneratedProtocolMessageType( "ClassificationEvaluationMetrics", (_message.Message,), - dict( - ConfidenceMetricsEntry=_reflection.GeneratedProtocolMessageType( + { + "ConfidenceMetricsEntry": _reflection.GeneratedProtocolMessageType( "ConfidenceMetricsEntry", (_message.Message,), - dict( - DESCRIPTOR=_CLASSIFICATIONEVALUATIONMETRICS_CONFIDENCEMETRICSENTRY, - __module__="google.cloud.automl_v1beta1.proto.classification_pb2", - __doc__="""Metrics for a single confidence threshold. - + { + "DESCRIPTOR": _CLASSIFICATIONEVALUATIONMETRICS_CONFIDENCEMETRICSENTRY, + "__module__": "google.cloud.automl_v1beta1.proto.classification_pb2", + "__doc__": """Metrics for a single confidence threshold. Attributes: confidence_threshold: @@ -843,7 +880,7 @@ Output only. Metrics are computed with an assumption that the model always returns at most this many predictions (ordered by their score, descendingly), but they all still need to meet - the confidence\_threshold. + the confidence_threshold. recall: Output only. Recall (True Positive Rate) for the given confidence threshold. @@ -867,11 +904,11 @@ label that has the highest prediction score and not below the confidence threshold for each example. f1_score_at1: - Output only. The harmonic mean of [recall\_at1][google.cloud.a - utoml.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetric - sEntry.recall\_at1] and [precision\_at1][google.cloud.automl.v - 1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry. - precision\_at1]. + Output only. The harmonic mean of [recall_at1][google.cloud.au + toml.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetrics + Entry.recall_at1] and [precision_at1][google.cloud.automl.v1be + ta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry.pre + cision_at1]. true_positive_count: Output only. The number of model created labels that match a ground truth label. @@ -887,50 +924,48 @@ label. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfidenceMetricsEntry) - ), + }, ), - ConfusionMatrix=_reflection.GeneratedProtocolMessageType( + "ConfusionMatrix": _reflection.GeneratedProtocolMessageType( "ConfusionMatrix", (_message.Message,), - dict( - Row=_reflection.GeneratedProtocolMessageType( + { + "Row": _reflection.GeneratedProtocolMessageType( "Row", (_message.Message,), - dict( - DESCRIPTOR=_CLASSIFICATIONEVALUATIONMETRICS_CONFUSIONMATRIX_ROW, - __module__="google.cloud.automl_v1beta1.proto.classification_pb2", - __doc__="""Output only. A row in the confusion matrix. - + { + "DESCRIPTOR": _CLASSIFICATIONEVALUATIONMETRICS_CONFUSIONMATRIX_ROW, + "__module__": "google.cloud.automl_v1beta1.proto.classification_pb2", + "__doc__": """Output only. A row in the confusion matrix. Attributes: example_count: Output only. Value of the specific cell in the confusion - matrix. The number of values each row has (i.e. the length of + matrix. The number of values each row has (i.e. the length of the row) is equal to the length of the ``annotation_spec_id`` field or, if that one is not populated, length of the [display - \_name][google.cloud.automl.v1beta1.ClassificationEvaluationMe - trics.ConfusionMatrix.display\_name] field. + _name][google.cloud.automl.v1beta1.ClassificationEvaluationMet + rics.ConfusionMatrix.display_name] field. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfusionMatrix.Row) - ), + }, ), - DESCRIPTOR=_CLASSIFICATIONEVALUATIONMETRICS_CONFUSIONMATRIX, - __module__="google.cloud.automl_v1beta1.proto.classification_pb2", - __doc__="""Confusion matrix of the model running the classification. - + "DESCRIPTOR": _CLASSIFICATIONEVALUATIONMETRICS_CONFUSIONMATRIX, + "__module__": "google.cloud.automl_v1beta1.proto.classification_pb2", + "__doc__": """Confusion matrix of the model running the classification. Attributes: annotation_spec_id: Output only. IDs of the annotation specs used in the confusion - matrix. For Tables CLASSIFICATION [prediction\_type][google.c - loud.automl.v1beta1.TablesModelMetadata.prediction\_type] only - list of [annotation\_spec\_display\_name-s][] is populated. + matrix. For Tables CLASSIFICATION [prediction_type][google.cl + oud.automl.v1beta1.TablesModelMetadata.prediction_type] only + list of [annotation_spec_display_name-s][] is populated. display_name: Output only. Display name of the annotation specs used in the confusion matrix, as they were at the moment of the - evaluation. For Tables CLASSIFICATION [prediction\_type-s][go - ogle.cloud.automl.v1beta1.TablesModelMetadata.prediction\_type - ], distinct values of the target column at the moment of the + evaluation. For Tables CLASSIFICATION [prediction_type-s][goo + gle.cloud.automl.v1beta1.TablesModelMetadata.prediction_type], + distinct values of the target column at the moment of the model evaluation are populated here. row: Output only. Rows in the confusion matrix. The number of rows @@ -941,14 +976,13 @@ evaluated. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfusionMatrix) - ), + }, ), - DESCRIPTOR=_CLASSIFICATIONEVALUATIONMETRICS, - __module__="google.cloud.automl_v1beta1.proto.classification_pb2", - __doc__="""Model evaluation metrics for classification problems. - Note: For Video Classification this metrics only describe quality of the - Video Classification predictions of "segment\_classification" type. - + "DESCRIPTOR": _CLASSIFICATIONEVALUATIONMETRICS, + "__module__": "google.cloud.automl_v1beta1.proto.classification_pb2", + "__doc__": """Model evaluation metrics for classification problems. Note: For Video + Classification this metrics only describe quality of the Video + Classification predictions of “segment_classification” type. Attributes: au_prc: @@ -964,12 +998,12 @@ log_loss: Output only. The Log Loss metric. confidence_metrics_entry: - Output only. Metrics for each confidence\_threshold in - 0.00,0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 and - position\_threshold = INT32\_MAX\_VALUE. ROC and precision- - recall curves, and other aggregated metrics are derived from - them. The confidence metrics entries may also be supplied for - additional values of position\_threshold, but from these no + Output only. Metrics for each confidence_threshold in + 0.00,0.05,0.10,…,0.95,0.96,0.97,0.98,0.99 and + position_threshold = INT32_MAX_VALUE. ROC and precision-recall + curves, and other aggregated metrics are derived from them. + The confidence metrics entries may also be supplied for + additional values of position_threshold, but from these no aggregated metrics are computed. confusion_matrix: Output only. Confusion matrix of the evaluation. Only set for @@ -980,7 +1014,7 @@ Output only. The annotation spec ids used for this evaluation. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ClassificationEvaluationMetrics) - ), + }, ) _sym_db.RegisterMessage(ClassificationEvaluationMetrics) _sym_db.RegisterMessage(ClassificationEvaluationMetrics.ConfidenceMetricsEntry) diff --git a/google/cloud/automl_v1beta1/proto/column_spec.proto b/google/cloud/automl_v1beta1/proto/column_spec.proto index b8f437f7..03389b8a 100644 --- a/google/cloud/automl_v1beta1/proto/column_spec.proto +++ b/google/cloud/automl_v1beta1/proto/column_spec.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,12 +11,12 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; package google.cloud.automl.v1beta1; +import "google/api/resource.proto"; import "google/cloud/automl/v1beta1/data_stats.proto"; import "google/cloud/automl/v1beta1/data_types.proto"; import "google/api/annotations.proto"; @@ -32,6 +32,11 @@ option ruby_package = "Google::Cloud::AutoML::V1beta1"; // Used by: // * Tables message ColumnSpec { + option (google.api.resource) = { + type: "automl.googleapis.com/ColumnSpec" + pattern: "projects/{project}/locations/{location}/datasets/{dataset}/tableSpecs/{table_spec}/columnSpecs/{column_spec}" + }; + // Identifies the table's column, and its correlation with the column this // ColumnSpec describes. message CorrelatedColumn { diff --git a/google/cloud/automl_v1beta1/proto/column_spec_pb2.py b/google/cloud/automl_v1beta1/proto/column_spec_pb2.py index 844bc058..b32f9826 100644 --- a/google/cloud/automl_v1beta1/proto/column_spec_pb2.py +++ b/google/cloud/automl_v1beta1/proto/column_spec_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/column_spec.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -15,6 +12,7 @@ _sym_db = _symbol_database.Default() +from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 from google.cloud.automl_v1beta1.proto import ( data_stats_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_data__stats__pb2, ) @@ -28,13 +26,11 @@ name="google/cloud/automl_v1beta1/proto/column_spec.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - '\n3google/cloud/automl_v1beta1/proto/column_spec.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x32google/cloud/automl_v1beta1/proto/data_stats.proto\x1a\x32google/cloud/automl_v1beta1/proto/data_types.proto\x1a\x1cgoogle/api/annotations.proto"\x84\x03\n\nColumnSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x38\n\tdata_type\x18\x02 \x01(\x0b\x32%.google.cloud.automl.v1beta1.DataType\x12\x14\n\x0c\x64isplay_name\x18\x03 \x01(\t\x12:\n\ndata_stats\x18\x04 \x01(\x0b\x32&.google.cloud.automl.v1beta1.DataStats\x12X\n\x16top_correlated_columns\x18\x05 \x03(\x0b\x32\x38.google.cloud.automl.v1beta1.ColumnSpec.CorrelatedColumn\x12\x0c\n\x04\x65tag\x18\x06 \x01(\t\x1at\n\x10\x43orrelatedColumn\x12\x16\n\x0e\x63olumn_spec_id\x18\x01 \x01(\t\x12H\n\x11\x63orrelation_stats\x18\x02 \x01(\x0b\x32-.google.cloud.automl.v1beta1.CorrelationStatsB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3' - ), + serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n3google/cloud/automl_v1beta1/proto/column_spec.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x19google/api/resource.proto\x1a\x32google/cloud/automl_v1beta1/proto/data_stats.proto\x1a\x32google/cloud/automl_v1beta1/proto/data_types.proto\x1a\x1cgoogle/api/annotations.proto"\x9b\x04\n\nColumnSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x38\n\tdata_type\x18\x02 \x01(\x0b\x32%.google.cloud.automl.v1beta1.DataType\x12\x14\n\x0c\x64isplay_name\x18\x03 \x01(\t\x12:\n\ndata_stats\x18\x04 \x01(\x0b\x32&.google.cloud.automl.v1beta1.DataStats\x12X\n\x16top_correlated_columns\x18\x05 \x03(\x0b\x32\x38.google.cloud.automl.v1beta1.ColumnSpec.CorrelatedColumn\x12\x0c\n\x04\x65tag\x18\x06 \x01(\t\x1at\n\x10\x43orrelatedColumn\x12\x16\n\x0e\x63olumn_spec_id\x18\x01 \x01(\t\x12H\n\x11\x63orrelation_stats\x18\x02 \x01(\x0b\x32-.google.cloud.automl.v1beta1.CorrelationStats:\x94\x01\xea\x41\x90\x01\n automl.googleapis.com/ColumnSpec\x12lprojects/{project}/locations/{location}/datasets/{dataset}/tableSpecs/{table_spec}/columnSpecs/{column_spec}B\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ + google_dot_api_dot_resource__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_data__stats__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_data__types__pb2.DESCRIPTOR, google_dot_api_dot_annotations__pb2.DESCRIPTOR, @@ -48,6 +44,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="column_spec_id", @@ -58,7 +55,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -66,6 +63,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="correlation_stats", @@ -84,6 +82,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -94,8 +93,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=491, - serialized_end=607, + serialized_start=518, + serialized_end=634, ) _COLUMNSPEC = _descriptor.Descriptor( @@ -104,6 +103,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -114,7 +114,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -122,6 +122,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="data_type", @@ -140,6 +141,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="display_name", @@ -150,7 +152,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -158,6 +160,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="data_stats", @@ -176,6 +179,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="top_correlated_columns", @@ -194,6 +198,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="etag", @@ -204,7 +209,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -212,18 +217,19 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], - nested_types=[_COLUMNSPEC_CORRELATEDCOLUMN], + nested_types=[_COLUMNSPEC_CORRELATEDCOLUMN,], enum_types=[], - serialized_options=None, + serialized_options=b"\352A\220\001\n automl.googleapis.com/ColumnSpec\022lprojects/{project}/locations/{location}/datasets/{dataset}/tableSpecs/{table_spec}/columnSpecs/{column_spec}", is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=219, - serialized_end=607, + serialized_start=246, + serialized_end=785, ) _COLUMNSPEC_CORRELATEDCOLUMN.fields_by_name[ @@ -251,33 +257,31 @@ ColumnSpec = _reflection.GeneratedProtocolMessageType( "ColumnSpec", (_message.Message,), - dict( - CorrelatedColumn=_reflection.GeneratedProtocolMessageType( + { + "CorrelatedColumn": _reflection.GeneratedProtocolMessageType( "CorrelatedColumn", (_message.Message,), - dict( - DESCRIPTOR=_COLUMNSPEC_CORRELATEDCOLUMN, - __module__="google.cloud.automl_v1beta1.proto.column_spec_pb2", - __doc__="""Identifies the table's column, and its correlation with - the column this ColumnSpec describes. - + { + "DESCRIPTOR": _COLUMNSPEC_CORRELATEDCOLUMN, + "__module__": "google.cloud.automl_v1beta1.proto.column_spec_pb2", + "__doc__": """Identifies the table’s column, and its correlation with the column + this ColumnSpec describes. Attributes: column_spec_id: - The column\_spec\_id of the correlated column, which belongs - to the same table as the in-context column. + The column_spec_id of the correlated column, which belongs to + the same table as the in-context column. correlation_stats: Correlation between this and the in-context column. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ColumnSpec.CorrelatedColumn) - ), + }, ), - DESCRIPTOR=_COLUMNSPEC, - __module__="google.cloud.automl_v1beta1.proto.column_spec_pb2", - __doc__="""A representation of a column in a relational table. When - listing them, column specs are returned in the same order in which they - were given on import . Used by: \* Tables - + "DESCRIPTOR": _COLUMNSPEC, + "__module__": "google.cloud.automl_v1beta1.proto.column_spec_pb2", + "__doc__": """A representation of a column in a relational table. When listing them, + column specs are returned in the same order in which they were given + on import . Used by: \* Tables Attributes: name: @@ -290,25 +294,26 @@ Output only. The name of the column to show in the interface. The name can be up to 100 characters long and can consist only of ASCII Latin letters A-Z and a-z, ASCII digits 0-9, - underscores(\_), and forward slashes(/), and must start with a + underscores(_), and forward slashes(/), and must start with a letter or a digit. data_stats: Output only. Stats of the series of values in the column. This - field may be stale, see the ancestor's - Dataset.tables\_dataset\_metadata.stats\_update\_time field - for the timestamp at which these stats were last updated. + field may be stale, see the ancestor’s + Dataset.tables_dataset_metadata.stats_update_time field for + the timestamp at which these stats were last updated. top_correlated_columns: Deprecated. etag: Used to perform consistent read-modify-write updates. If not - set, a blind "overwrite" update happens. + set, a blind “overwrite” update happens. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ColumnSpec) - ), + }, ) _sym_db.RegisterMessage(ColumnSpec) _sym_db.RegisterMessage(ColumnSpec.CorrelatedColumn) DESCRIPTOR._options = None +_COLUMNSPEC._options = None # @@protoc_insertion_point(module_scope) diff --git a/google/cloud/automl_v1beta1/proto/data_items.proto b/google/cloud/automl_v1beta1/proto/data_items.proto index 424a0c64..9b9187ad 100644 --- a/google/cloud/automl_v1beta1/proto/data_items.proto +++ b/google/cloud/automl_v1beta1/proto/data_items.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,19 +11,19 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; package google.cloud.automl.v1beta1; -import "google/api/annotations.proto"; import "google/cloud/automl/v1beta1/geometry.proto"; import "google/cloud/automl/v1beta1/io.proto"; +import "google/cloud/automl/v1beta1/temporal.proto"; import "google/cloud/automl/v1beta1/text_segment.proto"; import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/struct.proto"; +import "google/api/annotations.proto"; option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl"; option java_multiple_files = true; @@ -35,11 +35,9 @@ option ruby_package = "Google::Cloud::AutoML::V1beta1"; // Only images up to 30MB in size are supported. message Image { // Input only. The data representing the image. - // For Predict calls - // [image_bytes][google.cloud.automl.v1beta1.Image.image_bytes] must be set, - // as other options are not currently supported by prediction API. You can - // read the contents of an uploaded image by using the - // [content_uri][google.cloud.automl.v1beta1.Image.content_uri] field. + // For Predict calls [image_bytes][google.cloud.automl.v1beta1.Image.image_bytes] must be set, as other options are not + // currently supported by prediction API. You can read the contents of an + // uploaded image by using the [content_uri][google.cloud.automl.v1beta1.Image.content_uri] field. oneof data { // Image content represented as a stream of bytes. // Note: As with all `bytes` fields, protobuffers use a pure binary @@ -60,11 +58,9 @@ message TextSnippet { // characters long. string content = 1; - // Optional. The format of - // [content][google.cloud.automl.v1beta1.TextSnippet.content]. Currently the - // only two allowed values are "text/html" and "text/plain". If left blank, - // the format is automatically determined from the type of the uploaded - // [content][google.cloud.automl.v1beta1.TextSnippet.content]. + // Optional. The format of [content][google.cloud.automl.v1beta1.TextSnippet.content]. Currently the only two allowed + // values are "text/html" and "text/plain". If left blank, the format is + // automatically determined from the type of the uploaded [content][google.cloud.automl.v1beta1.TextSnippet.content]. string mime_type = 2; // Output only. HTTP URI where you can download the content. @@ -100,9 +96,7 @@ message DocumentDimensions { // A structured text document e.g. a PDF. message Document { - // Describes the layout information of a - // [text_segment][google.cloud.automl.v1beta1.Document.Layout.text_segment] in - // the document. + // Describes the layout information of a [text_segment][google.cloud.automl.v1beta1.Document.Layout.text_segment] in the document. message Layout { // The type of TextSegment in the context of the original document. enum TextSegmentType { @@ -151,14 +145,12 @@ message Document { // [document_text][google.cloud.automl.v1beta1.Document.document_text]. TextSegment text_segment = 1; - // Page number of the - // [text_segment][google.cloud.automl.v1beta1.Document.Layout.text_segment] - // in the original document, starts from 1. + // Page number of the [text_segment][google.cloud.automl.v1beta1.Document.Layout.text_segment] in the original document, starts + // from 1. int32 page_number = 2; - // The position of the - // [text_segment][google.cloud.automl.v1beta1.Document.Layout.text_segment] - // in the page. Contains exactly 4 + // The position of the [text_segment][google.cloud.automl.v1beta1.Document.Layout.text_segment] in the page. + // Contains exactly 4 // // [normalized_vertices][google.cloud.automl.v1beta1.BoundingPoly.normalized_vertices] // and they are connected by edges in the order provided, which will @@ -168,9 +160,7 @@ message Document { // Coordinates are based on top-left as point (0,0). BoundingPoly bounding_poly = 3; - // The type of the - // [text_segment][google.cloud.automl.v1beta1.Document.Layout.text_segment] - // in document. + // The type of the [text_segment][google.cloud.automl.v1beta1.Document.Layout.text_segment] in document. TextSegmentType text_segment_type = 4; } diff --git a/google/cloud/automl_v1beta1/proto/data_items_pb2.py b/google/cloud/automl_v1beta1/proto/data_items_pb2.py index c76bcf28..303eb85c 100644 --- a/google/cloud/automl_v1beta1/proto/data_items_pb2.py +++ b/google/cloud/automl_v1beta1/proto/data_items_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/data_items.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -15,39 +12,40 @@ _sym_db = _symbol_database.Default() -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 from google.cloud.automl_v1beta1.proto import ( geometry_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_geometry__pb2, ) from google.cloud.automl_v1beta1.proto import ( io_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_io__pb2, ) +from google.cloud.automl_v1beta1.proto import ( + temporal_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_temporal__pb2, +) from google.cloud.automl_v1beta1.proto import ( text_segment_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_text__segment__pb2, ) from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2 from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2 +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name="google/cloud/automl_v1beta1/proto/data_items.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - '\n2google/cloud/automl_v1beta1/proto/data_items.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x30google/cloud/automl_v1beta1/proto/geometry.proto\x1a*google/cloud/automl_v1beta1/proto/io.proto\x1a\x34google/cloud/automl_v1beta1/proto/text_segment.proto\x1a\x19google/protobuf/any.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1cgoogle/protobuf/struct.proto"\x7f\n\x05Image\x12\x15\n\x0bimage_bytes\x18\x01 \x01(\x0cH\x00\x12@\n\x0cinput_config\x18\x06 \x01(\x0b\x32(.google.cloud.automl.v1beta1.InputConfigH\x00\x12\x15\n\rthumbnail_uri\x18\x04 \x01(\tB\x06\n\x04\x64\x61ta"F\n\x0bTextSnippet\x12\x0f\n\x07\x63ontent\x18\x01 \x01(\t\x12\x11\n\tmime_type\x18\x02 \x01(\t\x12\x13\n\x0b\x63ontent_uri\x18\x04 \x01(\t"\xef\x01\n\x12\x44ocumentDimensions\x12S\n\x04unit\x18\x01 \x01(\x0e\x32\x45.google.cloud.automl.v1beta1.DocumentDimensions.DocumentDimensionUnit\x12\r\n\x05width\x18\x02 \x01(\x02\x12\x0e\n\x06height\x18\x03 \x01(\x02"e\n\x15\x44ocumentDimensionUnit\x12\'\n#DOCUMENT_DIMENSION_UNIT_UNSPECIFIED\x10\x00\x12\x08\n\x04INCH\x10\x01\x12\x0e\n\nCENTIMETER\x10\x02\x12\t\n\x05POINT\x10\x03"\xf9\x05\n\x08\x44ocument\x12\x46\n\x0cinput_config\x18\x01 \x01(\x0b\x32\x30.google.cloud.automl.v1beta1.DocumentInputConfig\x12?\n\rdocument_text\x18\x02 \x01(\x0b\x32(.google.cloud.automl.v1beta1.TextSnippet\x12<\n\x06layout\x18\x03 \x03(\x0b\x32,.google.cloud.automl.v1beta1.Document.Layout\x12L\n\x13\x64ocument_dimensions\x18\x04 \x01(\x0b\x32/.google.cloud.automl.v1beta1.DocumentDimensions\x12\x12\n\npage_count\x18\x05 \x01(\x05\x1a\xc3\x03\n\x06Layout\x12>\n\x0ctext_segment\x18\x01 \x01(\x0b\x32(.google.cloud.automl.v1beta1.TextSegment\x12\x13\n\x0bpage_number\x18\x02 \x01(\x05\x12@\n\rbounding_poly\x18\x03 \x01(\x0b\x32).google.cloud.automl.v1beta1.BoundingPoly\x12W\n\x11text_segment_type\x18\x04 \x01(\x0e\x32<.google.cloud.automl.v1beta1.Document.Layout.TextSegmentType"\xc8\x01\n\x0fTextSegmentType\x12!\n\x1dTEXT_SEGMENT_TYPE_UNSPECIFIED\x10\x00\x12\t\n\x05TOKEN\x10\x01\x12\r\n\tPARAGRAPH\x10\x02\x12\x0e\n\nFORM_FIELD\x10\x03\x12\x13\n\x0f\x46ORM_FIELD_NAME\x10\x04\x12\x17\n\x13\x46ORM_FIELD_CONTENTS\x10\x05\x12\t\n\x05TABLE\x10\x06\x12\x10\n\x0cTABLE_HEADER\x10\x07\x12\r\n\tTABLE_ROW\x10\x08\x12\x0e\n\nTABLE_CELL\x10\t"F\n\x03Row\x12\x17\n\x0f\x63olumn_spec_ids\x18\x02 \x03(\t\x12&\n\x06values\x18\x03 \x03(\x0b\x32\x16.google.protobuf.Value"\xfe\x01\n\x0e\x45xamplePayload\x12\x33\n\x05image\x18\x01 \x01(\x0b\x32".google.cloud.automl.v1beta1.ImageH\x00\x12@\n\x0ctext_snippet\x18\x02 \x01(\x0b\x32(.google.cloud.automl.v1beta1.TextSnippetH\x00\x12\x39\n\x08\x64ocument\x18\x04 \x01(\x0b\x32%.google.cloud.automl.v1beta1.DocumentH\x00\x12/\n\x03row\x18\x03 \x01(\x0b\x32 .google.cloud.automl.v1beta1.RowH\x00\x42\t\n\x07payloadB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3' - ), + serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n2google/cloud/automl_v1beta1/proto/data_items.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x30google/cloud/automl_v1beta1/proto/geometry.proto\x1a*google/cloud/automl_v1beta1/proto/io.proto\x1a\x30google/cloud/automl_v1beta1/proto/temporal.proto\x1a\x34google/cloud/automl_v1beta1/proto/text_segment.proto\x1a\x19google/protobuf/any.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1cgoogle/api/annotations.proto"\x7f\n\x05Image\x12\x15\n\x0bimage_bytes\x18\x01 \x01(\x0cH\x00\x12@\n\x0cinput_config\x18\x06 \x01(\x0b\x32(.google.cloud.automl.v1beta1.InputConfigH\x00\x12\x15\n\rthumbnail_uri\x18\x04 \x01(\tB\x06\n\x04\x64\x61ta"F\n\x0bTextSnippet\x12\x0f\n\x07\x63ontent\x18\x01 \x01(\t\x12\x11\n\tmime_type\x18\x02 \x01(\t\x12\x13\n\x0b\x63ontent_uri\x18\x04 \x01(\t"\xef\x01\n\x12\x44ocumentDimensions\x12S\n\x04unit\x18\x01 \x01(\x0e\x32\x45.google.cloud.automl.v1beta1.DocumentDimensions.DocumentDimensionUnit\x12\r\n\x05width\x18\x02 \x01(\x02\x12\x0e\n\x06height\x18\x03 \x01(\x02"e\n\x15\x44ocumentDimensionUnit\x12\'\n#DOCUMENT_DIMENSION_UNIT_UNSPECIFIED\x10\x00\x12\x08\n\x04INCH\x10\x01\x12\x0e\n\nCENTIMETER\x10\x02\x12\t\n\x05POINT\x10\x03"\xf9\x05\n\x08\x44ocument\x12\x46\n\x0cinput_config\x18\x01 \x01(\x0b\x32\x30.google.cloud.automl.v1beta1.DocumentInputConfig\x12?\n\rdocument_text\x18\x02 \x01(\x0b\x32(.google.cloud.automl.v1beta1.TextSnippet\x12<\n\x06layout\x18\x03 \x03(\x0b\x32,.google.cloud.automl.v1beta1.Document.Layout\x12L\n\x13\x64ocument_dimensions\x18\x04 \x01(\x0b\x32/.google.cloud.automl.v1beta1.DocumentDimensions\x12\x12\n\npage_count\x18\x05 \x01(\x05\x1a\xc3\x03\n\x06Layout\x12>\n\x0ctext_segment\x18\x01 \x01(\x0b\x32(.google.cloud.automl.v1beta1.TextSegment\x12\x13\n\x0bpage_number\x18\x02 \x01(\x05\x12@\n\rbounding_poly\x18\x03 \x01(\x0b\x32).google.cloud.automl.v1beta1.BoundingPoly\x12W\n\x11text_segment_type\x18\x04 \x01(\x0e\x32<.google.cloud.automl.v1beta1.Document.Layout.TextSegmentType"\xc8\x01\n\x0fTextSegmentType\x12!\n\x1dTEXT_SEGMENT_TYPE_UNSPECIFIED\x10\x00\x12\t\n\x05TOKEN\x10\x01\x12\r\n\tPARAGRAPH\x10\x02\x12\x0e\n\nFORM_FIELD\x10\x03\x12\x13\n\x0f\x46ORM_FIELD_NAME\x10\x04\x12\x17\n\x13\x46ORM_FIELD_CONTENTS\x10\x05\x12\t\n\x05TABLE\x10\x06\x12\x10\n\x0cTABLE_HEADER\x10\x07\x12\r\n\tTABLE_ROW\x10\x08\x12\x0e\n\nTABLE_CELL\x10\t"F\n\x03Row\x12\x17\n\x0f\x63olumn_spec_ids\x18\x02 \x03(\t\x12&\n\x06values\x18\x03 \x03(\x0b\x32\x16.google.protobuf.Value"\xfe\x01\n\x0e\x45xamplePayload\x12\x33\n\x05image\x18\x01 \x01(\x0b\x32".google.cloud.automl.v1beta1.ImageH\x00\x12@\n\x0ctext_snippet\x18\x02 \x01(\x0b\x32(.google.cloud.automl.v1beta1.TextSnippetH\x00\x12\x39\n\x08\x64ocument\x18\x04 \x01(\x0b\x32%.google.cloud.automl.v1beta1.DocumentH\x00\x12/\n\x03row\x18\x03 \x01(\x0b\x32 .google.cloud.automl.v1beta1.RowH\x00\x42\t\n\x07payloadB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_geometry__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_io__pb2.DESCRIPTOR, + google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_temporal__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_text__segment__pb2.DESCRIPTOR, google_dot_protobuf_dot_any__pb2.DESCRIPTOR, google_dot_protobuf_dot_duration__pb2.DESCRIPTOR, google_dot_protobuf_dot_struct__pb2.DESCRIPTOR, + google_dot_api_dot_annotations__pb2.DESCRIPTOR, ], ) @@ -57,6 +55,7 @@ full_name="google.cloud.automl.v1beta1.DocumentDimensions.DocumentDimensionUnit", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name="DOCUMENT_DIMENSION_UNIT_UNSPECIFIED", @@ -64,21 +63,37 @@ number=0, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="INCH", index=1, number=1, serialized_options=None, type=None + name="INCH", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="CENTIMETER", index=2, number=2, serialized_options=None, type=None + name="CENTIMETER", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="POINT", index=3, number=3, serialized_options=None, type=None + name="POINT", + index=3, + number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, serialized_options=None, - serialized_start=690, - serialized_end=791, + serialized_start=740, + serialized_end=841, ) _sym_db.RegisterEnumDescriptor(_DOCUMENTDIMENSIONS_DOCUMENTDIMENSIONUNIT) @@ -87,6 +102,7 @@ full_name="google.cloud.automl.v1beta1.Document.Layout.TextSegmentType", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name="TEXT_SEGMENT_TYPE_UNSPECIFIED", @@ -94,15 +110,31 @@ number=0, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="TOKEN", index=1, number=1, serialized_options=None, type=None + name="TOKEN", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="PARAGRAPH", index=2, number=2, serialized_options=None, type=None + name="PARAGRAPH", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="FORM_FIELD", index=3, number=3, serialized_options=None, type=None + name="FORM_FIELD", + index=3, + number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="FORM_FIELD_NAME", @@ -110,6 +142,7 @@ number=4, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( name="FORM_FIELD_CONTENTS", @@ -117,24 +150,45 @@ number=5, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="TABLE", index=6, number=6, serialized_options=None, type=None + name="TABLE", + index=6, + number=6, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="TABLE_HEADER", index=7, number=7, serialized_options=None, type=None + name="TABLE_HEADER", + index=7, + number=7, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="TABLE_ROW", index=8, number=8, serialized_options=None, type=None + name="TABLE_ROW", + index=8, + number=8, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="TABLE_CELL", index=9, number=9, serialized_options=None, type=None + name="TABLE_CELL", + index=9, + number=9, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, serialized_options=None, - serialized_start=1355, - serialized_end=1555, + serialized_start=1405, + serialized_end=1605, ) _sym_db.RegisterEnumDescriptor(_DOCUMENT_LAYOUT_TEXTSEGMENTTYPE) @@ -145,6 +199,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="image_bytes", @@ -155,7 +210,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b(""), + default_value=b"", message_type=None, enum_type=None, containing_type=None, @@ -163,6 +218,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="input_config", @@ -181,6 +237,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="thumbnail_uri", @@ -191,7 +248,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -199,6 +256,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -214,11 +272,12 @@ full_name="google.cloud.automl.v1beta1.Image.data", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], - ) + ), ], - serialized_start=350, - serialized_end=477, + serialized_start=400, + serialized_end=527, ) @@ -228,6 +287,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="content", @@ -238,7 +298,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -246,6 +306,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="mime_type", @@ -256,7 +317,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -264,6 +325,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="content_uri", @@ -274,7 +336,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -282,6 +344,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -292,8 +355,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=479, - serialized_end=549, + serialized_start=529, + serialized_end=599, ) @@ -303,6 +366,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="unit", @@ -321,6 +385,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="width", @@ -339,6 +404,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="height", @@ -357,18 +423,19 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], nested_types=[], - enum_types=[_DOCUMENTDIMENSIONS_DOCUMENTDIMENSIONUNIT], + enum_types=[_DOCUMENTDIMENSIONS_DOCUMENTDIMENSIONUNIT,], serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=552, - serialized_end=791, + serialized_start=602, + serialized_end=841, ) @@ -378,6 +445,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="text_segment", @@ -396,6 +464,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="page_number", @@ -414,6 +483,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="bounding_poly", @@ -432,6 +502,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="text_segment_type", @@ -450,18 +521,19 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], nested_types=[], - enum_types=[_DOCUMENT_LAYOUT_TEXTSEGMENTTYPE], + enum_types=[_DOCUMENT_LAYOUT_TEXTSEGMENTTYPE,], serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1104, - serialized_end=1555, + serialized_start=1154, + serialized_end=1605, ) _DOCUMENT = _descriptor.Descriptor( @@ -470,6 +542,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="input_config", @@ -488,6 +561,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="document_text", @@ -506,6 +580,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="layout", @@ -524,6 +599,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="document_dimensions", @@ -542,6 +618,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="page_count", @@ -560,18 +637,19 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], - nested_types=[_DOCUMENT_LAYOUT], + nested_types=[_DOCUMENT_LAYOUT,], enum_types=[], serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=794, - serialized_end=1555, + serialized_start=844, + serialized_end=1605, ) @@ -581,6 +659,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="column_spec_ids", @@ -599,6 +678,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="values", @@ -617,6 +697,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -627,8 +708,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1557, - serialized_end=1627, + serialized_start=1607, + serialized_end=1677, ) @@ -638,6 +719,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="image", @@ -656,6 +738,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="text_snippet", @@ -674,6 +757,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="document", @@ -692,6 +776,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="row", @@ -710,6 +795,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -725,11 +811,12 @@ full_name="google.cloud.automl.v1beta1.ExamplePayload.payload", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], - ) + ), ], - serialized_start=1630, - serialized_end=1884, + serialized_start=1680, + serialized_end=1934, ) _IMAGE.fields_by_name[ @@ -806,21 +893,20 @@ Image = _reflection.GeneratedProtocolMessageType( "Image", (_message.Message,), - dict( - DESCRIPTOR=_IMAGE, - __module__="google.cloud.automl_v1beta1.proto.data_items_pb2", - __doc__="""A representation of an image. Only images up to 30MB in - size are supported. - + { + "DESCRIPTOR": _IMAGE, + "__module__": "google.cloud.automl_v1beta1.proto.data_items_pb2", + "__doc__": """A representation of an image. Only images up to 30MB in size are + supported. Attributes: data: Input only. The data representing the image. For Predict calls - [image\_bytes][google.cloud.automl.v1beta1.Image.image\_bytes] + [image_bytes][google.cloud.automl.v1beta1.Image.image_bytes] must be set, as other options are not currently supported by prediction API. You can read the contents of an uploaded image by using the - [content\_uri][google.cloud.automl.v1beta1.Image.content\_uri] + [content_uri][google.cloud.automl.v1beta1.Image.content_uri] field. image_bytes: Image content represented as a stream of bytes. Note: As with @@ -832,18 +918,17 @@ Output only. HTTP URI to the thumbnail image. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.Image) - ), + }, ) _sym_db.RegisterMessage(Image) TextSnippet = _reflection.GeneratedProtocolMessageType( "TextSnippet", (_message.Message,), - dict( - DESCRIPTOR=_TEXTSNIPPET, - __module__="google.cloud.automl_v1beta1.proto.data_items_pb2", - __doc__="""A representation of a text snippet. - + { + "DESCRIPTOR": _TEXTSNIPPET, + "__module__": "google.cloud.automl_v1beta1.proto.data_items_pb2", + "__doc__": """A representation of a text snippet. Attributes: content: @@ -852,26 +937,25 @@ mime_type: Optional. The format of [content][google.cloud.automl.v1beta1.TextSnippet.content]. - Currently the only two allowed values are "text/html" and - "text/plain". If left blank, the format is automatically + Currently the only two allowed values are “text/html” and + “text/plain”. If left blank, the format is automatically determined from the type of the uploaded [content][google.cloud.automl.v1beta1.TextSnippet.content]. content_uri: Output only. HTTP URI where you can download the content. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TextSnippet) - ), + }, ) _sym_db.RegisterMessage(TextSnippet) DocumentDimensions = _reflection.GeneratedProtocolMessageType( "DocumentDimensions", (_message.Message,), - dict( - DESCRIPTOR=_DOCUMENTDIMENSIONS, - __module__="google.cloud.automl_v1beta1.proto.data_items_pb2", - __doc__="""Message that describes dimension of a document. - + { + "DESCRIPTOR": _DOCUMENTDIMENSIONS, + "__module__": "google.cloud.automl_v1beta1.proto.data_items_pb2", + "__doc__": """Message that describes dimension of a document. Attributes: unit: @@ -882,53 +966,50 @@ Height value of the document, works together with the unit. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.DocumentDimensions) - ), + }, ) _sym_db.RegisterMessage(DocumentDimensions) Document = _reflection.GeneratedProtocolMessageType( "Document", (_message.Message,), - dict( - Layout=_reflection.GeneratedProtocolMessageType( + { + "Layout": _reflection.GeneratedProtocolMessageType( "Layout", (_message.Message,), - dict( - DESCRIPTOR=_DOCUMENT_LAYOUT, - __module__="google.cloud.automl_v1beta1.proto.data_items_pb2", - __doc__="""Describes the layout information of a - [text\_segment][google.cloud.automl.v1beta1.Document.Layout.text\_segment] - in the document. - + { + "DESCRIPTOR": _DOCUMENT_LAYOUT, + "__module__": "google.cloud.automl_v1beta1.proto.data_items_pb2", + "__doc__": """Describes the layout information of a [text_segment][google.cloud.auto + ml.v1beta1.Document.Layout.text_segment] in the document. Attributes: text_segment: - Text Segment that represents a segment in [document\_text][goo - gle.cloud.automl.v1beta1.Document.document\_text]. + Text Segment that represents a segment in [document_text][goog + le.cloud.automl.v1beta1.Document.document_text]. page_number: - Page number of the [text\_segment][google.cloud.automl.v1beta1 - .Document.Layout.text\_segment] in the original document, - starts from 1. + Page number of the [text_segment][google.cloud.automl.v1beta1. + Document.Layout.text_segment] in the original document, starts + from 1. bounding_poly: - The position of the [text\_segment][google.cloud.automl.v1beta - 1.Document.Layout.text\_segment] in the page. Contains exactly - 4 [normalized\_vertices][google.cloud.automl.v1beta1.Bounding - Poly.normalized\_vertices] and they are connected by edges in - the order provided, which will represent a rectangle parallel - to the frame. The [NormalizedVertex-s][google.cloud.automl.v1b - eta1.NormalizedVertex] are relative to the page. Coordinates - are based on top-left as point (0,0). + The position of the [text_segment][google.cloud.automl.v1beta1 + .Document.Layout.text_segment] in the page. Contains exactly 4 + [normalized_vertices][google.cloud.automl.v1beta1.BoundingPoly + .normalized_vertices] and they are connected by edges in the + order provided, which will represent a rectangle parallel to + the frame. The [NormalizedVertex-s][google.cloud.automl.v1beta + 1.NormalizedVertex] are relative to the page. Coordinates are + based on top-left as point (0,0). text_segment_type: - The type of the [text\_segment][google.cloud.automl.v1beta1.Do - cument.Layout.text\_segment] in document. + The type of the [text_segment][google.cloud.automl.v1beta1.Doc + ument.Layout.text_segment] in document. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.Document.Layout) - ), + }, ), - DESCRIPTOR=_DOCUMENT, - __module__="google.cloud.automl_v1beta1.proto.data_items_pb2", - __doc__="""A structured text document e.g. a PDF. - + "DESCRIPTOR": _DOCUMENT, + "__module__": "google.cloud.automl_v1beta1.proto.data_items_pb2", + "__doc__": """A structured text document e.g. a PDF. Attributes: input_config: @@ -937,14 +1018,14 @@ The plain text version of this document. layout: Describes the layout of the document. Sorted by - [page\_number][]. + [page_number][]. document_dimensions: The dimensions of the page in the document. page_count: Number of pages in the document. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.Document) - ), + }, ) _sym_db.RegisterMessage(Document) _sym_db.RegisterMessage(Document.Layout) @@ -952,41 +1033,39 @@ Row = _reflection.GeneratedProtocolMessageType( "Row", (_message.Message,), - dict( - DESCRIPTOR=_ROW, - __module__="google.cloud.automl_v1beta1.proto.data_items_pb2", - __doc__="""A representation of a row in a relational table. - + { + "DESCRIPTOR": _ROW, + "__module__": "google.cloud.automl_v1beta1.proto.data_items_pb2", + "__doc__": """A representation of a row in a relational table. Attributes: column_spec_ids: The resource IDs of the column specs describing the columns of the row. If set must contain, but possibly in a different - order, all input feature [column\_spec\_ids][google.cloud.aut - oml.v1beta1.TablesModelMetadata.input\_feature\_column\_specs] - of the Model this row is being passed to. Note: The below + order, all input feature [column_spec_ids][google.cloud.autom + l.v1beta1.TablesModelMetadata.input_feature_column_specs] of + the Model this row is being passed to. Note: The below ``values`` field must match order of this field, if this field is set. values: Required. The values of the row cells, given in the same order - as the column\_spec\_ids, or, if not set, then in the same - order as input feature [column\_specs][google.cloud.automl.v1 - beta1.TablesModelMetadata.input\_feature\_column\_specs] of - the Model this row is being passed to. + as the column_spec_ids, or, if not set, then in the same order + as input feature [column_specs][google.cloud.automl.v1beta1.T + ablesModelMetadata.input_feature_column_specs] of the Model + this row is being passed to. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.Row) - ), + }, ) _sym_db.RegisterMessage(Row) ExamplePayload = _reflection.GeneratedProtocolMessageType( "ExamplePayload", (_message.Message,), - dict( - DESCRIPTOR=_EXAMPLEPAYLOAD, - __module__="google.cloud.automl_v1beta1.proto.data_items_pb2", - __doc__="""Example data used for training or prediction. - + { + "DESCRIPTOR": _EXAMPLEPAYLOAD, + "__module__": "google.cloud.automl_v1beta1.proto.data_items_pb2", + "__doc__": """Example data used for training or prediction. Attributes: payload: @@ -1001,7 +1080,7 @@ Example relational table row. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ExamplePayload) - ), + }, ) _sym_db.RegisterMessage(ExamplePayload) diff --git a/google/cloud/automl_v1beta1/proto/data_stats.proto b/google/cloud/automl_v1beta1/proto/data_stats.proto index 5d941a5b..c13a5d45 100644 --- a/google/cloud/automl_v1beta1/proto/data_stats.proto +++ b/google/cloud/automl_v1beta1/proto/data_stats.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; diff --git a/google/cloud/automl_v1beta1/proto/data_stats_pb2.py b/google/cloud/automl_v1beta1/proto/data_stats_pb2.py index 85f18cee..dc31756b 100644 --- a/google/cloud/automl_v1beta1/proto/data_stats_pb2.py +++ b/google/cloud/automl_v1beta1/proto/data_stats_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/data_stats.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -22,13 +19,10 @@ name="google/cloud/automl_v1beta1/proto/data_stats.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - '\n2google/cloud/automl_v1beta1/proto/data_stats.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto"\xfd\x03\n\tDataStats\x12\x42\n\rfloat64_stats\x18\x03 \x01(\x0b\x32).google.cloud.automl.v1beta1.Float64StatsH\x00\x12@\n\x0cstring_stats\x18\x04 \x01(\x0b\x32(.google.cloud.automl.v1beta1.StringStatsH\x00\x12\x46\n\x0ftimestamp_stats\x18\x05 \x01(\x0b\x32+.google.cloud.automl.v1beta1.TimestampStatsH\x00\x12>\n\x0b\x61rray_stats\x18\x06 \x01(\x0b\x32\'.google.cloud.automl.v1beta1.ArrayStatsH\x00\x12@\n\x0cstruct_stats\x18\x07 \x01(\x0b\x32(.google.cloud.automl.v1beta1.StructStatsH\x00\x12\x44\n\x0e\x63\x61tegory_stats\x18\x08 \x01(\x0b\x32*.google.cloud.automl.v1beta1.CategoryStatsH\x00\x12\x1c\n\x14\x64istinct_value_count\x18\x01 \x01(\x03\x12\x18\n\x10null_value_count\x18\x02 \x01(\x03\x12\x19\n\x11valid_value_count\x18\t \x01(\x03\x42\x07\n\x05stats"\xdd\x01\n\x0c\x46loat64Stats\x12\x0c\n\x04mean\x18\x01 \x01(\x01\x12\x1a\n\x12standard_deviation\x18\x02 \x01(\x01\x12\x11\n\tquantiles\x18\x03 \x03(\x01\x12T\n\x11histogram_buckets\x18\x04 \x03(\x0b\x32\x39.google.cloud.automl.v1beta1.Float64Stats.HistogramBucket\x1a:\n\x0fHistogramBucket\x12\x0b\n\x03min\x18\x01 \x01(\x01\x12\x0b\n\x03max\x18\x02 \x01(\x01\x12\r\n\x05\x63ount\x18\x03 \x01(\x03"\x8d\x01\n\x0bStringStats\x12P\n\x11top_unigram_stats\x18\x01 \x03(\x0b\x32\x35.google.cloud.automl.v1beta1.StringStats.UnigramStats\x1a,\n\x0cUnigramStats\x12\r\n\x05value\x18\x01 \x01(\t\x12\r\n\x05\x63ount\x18\x02 \x01(\x03"\xf4\x02\n\x0eTimestampStats\x12V\n\x0egranular_stats\x18\x01 \x03(\x0b\x32>.google.cloud.automl.v1beta1.TimestampStats.GranularStatsEntry\x1a\x98\x01\n\rGranularStats\x12W\n\x07\x62uckets\x18\x01 \x03(\x0b\x32\x46.google.cloud.automl.v1beta1.TimestampStats.GranularStats.BucketsEntry\x1a.\n\x0c\x42ucketsEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\r\n\x05value\x18\x02 \x01(\x03:\x02\x38\x01\x1ao\n\x12GranularStatsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12H\n\x05value\x18\x02 \x01(\x0b\x32\x39.google.cloud.automl.v1beta1.TimestampStats.GranularStats:\x02\x38\x01"J\n\nArrayStats\x12<\n\x0cmember_stats\x18\x02 \x01(\x0b\x32&.google.cloud.automl.v1beta1.DataStats"\xb7\x01\n\x0bStructStats\x12M\n\x0b\x66ield_stats\x18\x01 \x03(\x0b\x32\x38.google.cloud.automl.v1beta1.StructStats.FieldStatsEntry\x1aY\n\x0f\x46ieldStatsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x35\n\x05value\x18\x02 \x01(\x0b\x32&.google.cloud.automl.v1beta1.DataStats:\x02\x38\x01"\xa0\x01\n\rCategoryStats\x12Z\n\x12top_category_stats\x18\x01 \x03(\x0b\x32>.google.cloud.automl.v1beta1.CategoryStats.SingleCategoryStats\x1a\x33\n\x13SingleCategoryStats\x12\r\n\x05value\x18\x01 \x01(\t\x12\r\n\x05\x63ount\x18\x02 \x01(\x03"%\n\x10\x43orrelationStats\x12\x11\n\tcramers_v\x18\x01 \x01(\x01\x42\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3' - ), - dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR], + serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n2google/cloud/automl_v1beta1/proto/data_stats.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto"\xfd\x03\n\tDataStats\x12\x42\n\rfloat64_stats\x18\x03 \x01(\x0b\x32).google.cloud.automl.v1beta1.Float64StatsH\x00\x12@\n\x0cstring_stats\x18\x04 \x01(\x0b\x32(.google.cloud.automl.v1beta1.StringStatsH\x00\x12\x46\n\x0ftimestamp_stats\x18\x05 \x01(\x0b\x32+.google.cloud.automl.v1beta1.TimestampStatsH\x00\x12>\n\x0b\x61rray_stats\x18\x06 \x01(\x0b\x32\'.google.cloud.automl.v1beta1.ArrayStatsH\x00\x12@\n\x0cstruct_stats\x18\x07 \x01(\x0b\x32(.google.cloud.automl.v1beta1.StructStatsH\x00\x12\x44\n\x0e\x63\x61tegory_stats\x18\x08 \x01(\x0b\x32*.google.cloud.automl.v1beta1.CategoryStatsH\x00\x12\x1c\n\x14\x64istinct_value_count\x18\x01 \x01(\x03\x12\x18\n\x10null_value_count\x18\x02 \x01(\x03\x12\x19\n\x11valid_value_count\x18\t \x01(\x03\x42\x07\n\x05stats"\xdd\x01\n\x0c\x46loat64Stats\x12\x0c\n\x04mean\x18\x01 \x01(\x01\x12\x1a\n\x12standard_deviation\x18\x02 \x01(\x01\x12\x11\n\tquantiles\x18\x03 \x03(\x01\x12T\n\x11histogram_buckets\x18\x04 \x03(\x0b\x32\x39.google.cloud.automl.v1beta1.Float64Stats.HistogramBucket\x1a:\n\x0fHistogramBucket\x12\x0b\n\x03min\x18\x01 \x01(\x01\x12\x0b\n\x03max\x18\x02 \x01(\x01\x12\r\n\x05\x63ount\x18\x03 \x01(\x03"\x8d\x01\n\x0bStringStats\x12P\n\x11top_unigram_stats\x18\x01 \x03(\x0b\x32\x35.google.cloud.automl.v1beta1.StringStats.UnigramStats\x1a,\n\x0cUnigramStats\x12\r\n\x05value\x18\x01 \x01(\t\x12\r\n\x05\x63ount\x18\x02 \x01(\x03"\xf4\x02\n\x0eTimestampStats\x12V\n\x0egranular_stats\x18\x01 \x03(\x0b\x32>.google.cloud.automl.v1beta1.TimestampStats.GranularStatsEntry\x1a\x98\x01\n\rGranularStats\x12W\n\x07\x62uckets\x18\x01 \x03(\x0b\x32\x46.google.cloud.automl.v1beta1.TimestampStats.GranularStats.BucketsEntry\x1a.\n\x0c\x42ucketsEntry\x12\x0b\n\x03key\x18\x01 \x01(\x05\x12\r\n\x05value\x18\x02 \x01(\x03:\x02\x38\x01\x1ao\n\x12GranularStatsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12H\n\x05value\x18\x02 \x01(\x0b\x32\x39.google.cloud.automl.v1beta1.TimestampStats.GranularStats:\x02\x38\x01"J\n\nArrayStats\x12<\n\x0cmember_stats\x18\x02 \x01(\x0b\x32&.google.cloud.automl.v1beta1.DataStats"\xb7\x01\n\x0bStructStats\x12M\n\x0b\x66ield_stats\x18\x01 \x03(\x0b\x32\x38.google.cloud.automl.v1beta1.StructStats.FieldStatsEntry\x1aY\n\x0f\x46ieldStatsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x35\n\x05value\x18\x02 \x01(\x0b\x32&.google.cloud.automl.v1beta1.DataStats:\x02\x38\x01"\xa0\x01\n\rCategoryStats\x12Z\n\x12top_category_stats\x18\x01 \x03(\x0b\x32>.google.cloud.automl.v1beta1.CategoryStats.SingleCategoryStats\x1a\x33\n\x13SingleCategoryStats\x12\r\n\x05value\x18\x01 \x01(\t\x12\r\n\x05\x63ount\x18\x02 \x01(\x03"%\n\x10\x43orrelationStats\x12\x11\n\tcramers_v\x18\x01 \x01(\x01\x42\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', + dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,], ) @@ -38,6 +32,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="float64_stats", @@ -56,6 +51,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="string_stats", @@ -74,6 +70,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="timestamp_stats", @@ -92,6 +89,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="array_stats", @@ -110,6 +108,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="struct_stats", @@ -128,6 +127,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="category_stats", @@ -146,6 +146,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="distinct_value_count", @@ -164,6 +165,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="null_value_count", @@ -182,6 +184,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="valid_value_count", @@ -200,6 +203,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -215,8 +219,9 @@ full_name="google.cloud.automl.v1beta1.DataStats.stats", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], - ) + ), ], serialized_start=114, serialized_end=623, @@ -229,6 +234,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="min", @@ -247,6 +253,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="max", @@ -265,6 +272,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="count", @@ -283,6 +291,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -303,6 +312,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="mean", @@ -321,6 +331,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="standard_deviation", @@ -339,6 +350,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="quantiles", @@ -357,6 +369,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="histogram_buckets", @@ -375,10 +388,11 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], - nested_types=[_FLOAT64STATS_HISTOGRAMBUCKET], + nested_types=[_FLOAT64STATS_HISTOGRAMBUCKET,], enum_types=[], serialized_options=None, is_extendable=False, @@ -396,6 +410,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="value", @@ -406,7 +421,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -414,6 +429,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="count", @@ -432,6 +448,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -452,6 +469,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="top_unigram_stats", @@ -470,10 +488,11 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + create_key=_descriptor._internal_create_key, + ), ], extensions=[], - nested_types=[_STRINGSTATS_UNIGRAMSTATS], + nested_types=[_STRINGSTATS_UNIGRAMSTATS,], enum_types=[], serialized_options=None, is_extendable=False, @@ -491,6 +510,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="key", @@ -509,6 +529,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="value", @@ -527,12 +548,13 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], nested_types=[], enum_types=[], - serialized_options=_b("8\001"), + serialized_options=b"8\001", is_extendable=False, syntax="proto3", extension_ranges=[], @@ -547,6 +569,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="buckets", @@ -565,10 +588,11 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + create_key=_descriptor._internal_create_key, + ), ], extensions=[], - nested_types=[_TIMESTAMPSTATS_GRANULARSTATS_BUCKETSENTRY], + nested_types=[_TIMESTAMPSTATS_GRANULARSTATS_BUCKETSENTRY,], enum_types=[], serialized_options=None, is_extendable=False, @@ -585,6 +609,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="key", @@ -595,7 +620,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -603,6 +628,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="value", @@ -621,12 +647,13 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], nested_types=[], enum_types=[], - serialized_options=_b("8\001"), + serialized_options=b"8\001", is_extendable=False, syntax="proto3", extension_ranges=[], @@ -641,6 +668,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="granular_stats", @@ -659,10 +687,11 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + create_key=_descriptor._internal_create_key, + ), ], extensions=[], - nested_types=[_TIMESTAMPSTATS_GRANULARSTATS, _TIMESTAMPSTATS_GRANULARSTATSENTRY], + nested_types=[_TIMESTAMPSTATS_GRANULARSTATS, _TIMESTAMPSTATS_GRANULARSTATSENTRY,], enum_types=[], serialized_options=None, is_extendable=False, @@ -680,6 +709,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="member_stats", @@ -698,7 +728,8 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + create_key=_descriptor._internal_create_key, + ), ], extensions=[], nested_types=[], @@ -719,6 +750,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="key", @@ -729,7 +761,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -737,6 +769,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="value", @@ -755,12 +788,13 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], nested_types=[], enum_types=[], - serialized_options=_b("8\001"), + serialized_options=b"8\001", is_extendable=False, syntax="proto3", extension_ranges=[], @@ -775,6 +809,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="field_stats", @@ -793,10 +828,11 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + create_key=_descriptor._internal_create_key, + ), ], extensions=[], - nested_types=[_STRUCTSTATS_FIELDSTATSENTRY], + nested_types=[_STRUCTSTATS_FIELDSTATSENTRY,], enum_types=[], serialized_options=None, is_extendable=False, @@ -814,6 +850,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="value", @@ -824,7 +861,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -832,6 +869,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="count", @@ -850,6 +888,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -870,6 +909,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="top_category_stats", @@ -888,10 +928,11 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + create_key=_descriptor._internal_create_key, + ), ], extensions=[], - nested_types=[_CATEGORYSTATS_SINGLECATEGORYSTATS], + nested_types=[_CATEGORYSTATS_SINGLECATEGORYSTATS,], enum_types=[], serialized_options=None, is_extendable=False, @@ -909,6 +950,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="cramers_v", @@ -927,7 +969,8 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + create_key=_descriptor._internal_create_key, + ), ], extensions=[], nested_types=[], @@ -1026,12 +1069,11 @@ DataStats = _reflection.GeneratedProtocolMessageType( "DataStats", (_message.Message,), - dict( - DESCRIPTOR=_DATASTATS, - __module__="google.cloud.automl_v1beta1.proto.data_stats_pb2", - __doc__="""The data statistics of a series of values that share the - same DataType. - + { + "DESCRIPTOR": _DATASTATS, + "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", + "__doc__": """The data statistics of a series of values that share the same + DataType. Attributes: stats: @@ -1056,40 +1098,38 @@ The number of values that are valid. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.DataStats) - ), + }, ) _sym_db.RegisterMessage(DataStats) Float64Stats = _reflection.GeneratedProtocolMessageType( "Float64Stats", (_message.Message,), - dict( - HistogramBucket=_reflection.GeneratedProtocolMessageType( + { + "HistogramBucket": _reflection.GeneratedProtocolMessageType( "HistogramBucket", (_message.Message,), - dict( - DESCRIPTOR=_FLOAT64STATS_HISTOGRAMBUCKET, - __module__="google.cloud.automl_v1beta1.proto.data_stats_pb2", - __doc__="""A bucket of a histogram. - + { + "DESCRIPTOR": _FLOAT64STATS_HISTOGRAMBUCKET, + "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", + "__doc__": """A bucket of a histogram. Attributes: min: The minimum value of the bucket, inclusive. max: The maximum value of the bucket, exclusive unless max = - ``"Infinity"``, in which case it's inclusive. + ``"Infinity"``, in which case it’s inclusive. count: - The number of data values that are in the bucket, i.e. are + The number of data values that are in the bucket, i.e. are between min and max values. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.Float64Stats.HistogramBucket) - ), + }, ), - DESCRIPTOR=_FLOAT64STATS, - __module__="google.cloud.automl_v1beta1.proto.data_stats_pb2", - __doc__="""The data statistics of a series of FLOAT64 values. - + "DESCRIPTOR": _FLOAT64STATS, + "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", + "__doc__": """The data statistics of a series of FLOAT64 values. Attributes: mean: @@ -1098,7 +1138,7 @@ The standard deviation of the series. quantiles: Ordered from 0 to k k-quantile values of the data series of n - values. The value at index i is, approximately, the i\*n/k-th + values. The value at index i is, approximately, the i*n/k-th smallest value in the series; for i = 0 and i = k these are, respectively, the min and max values. histogram_buckets: @@ -1110,7 +1150,7 @@ ``"Infinity"``. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.Float64Stats) - ), + }, ) _sym_db.RegisterMessage(Float64Stats) _sym_db.RegisterMessage(Float64Stats.HistogramBucket) @@ -1118,15 +1158,14 @@ StringStats = _reflection.GeneratedProtocolMessageType( "StringStats", (_message.Message,), - dict( - UnigramStats=_reflection.GeneratedProtocolMessageType( + { + "UnigramStats": _reflection.GeneratedProtocolMessageType( "UnigramStats", (_message.Message,), - dict( - DESCRIPTOR=_STRINGSTATS_UNIGRAMSTATS, - __module__="google.cloud.automl_v1beta1.proto.data_stats_pb2", - __doc__="""The statistics of a unigram. - + { + "DESCRIPTOR": _STRINGSTATS_UNIGRAMSTATS, + "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", + "__doc__": """The statistics of a unigram. Attributes: value: @@ -1135,12 +1174,11 @@ The number of occurrences of this unigram in the series. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.StringStats.UnigramStats) - ), + }, ), - DESCRIPTOR=_STRINGSTATS, - __module__="google.cloud.automl_v1beta1.proto.data_stats_pb2", - __doc__="""The data statistics of a series of STRING values. - + "DESCRIPTOR": _STRINGSTATS, + "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", + "__doc__": """The data statistics of a series of STRING values. Attributes: top_unigram_stats: @@ -1148,7 +1186,7 @@ le.cloud.automl.v1beta1.StringStats.UnigramStats.count]. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.StringStats) - ), + }, ) _sym_db.RegisterMessage(StringStats) _sym_db.RegisterMessage(StringStats.UnigramStats) @@ -1156,58 +1194,56 @@ TimestampStats = _reflection.GeneratedProtocolMessageType( "TimestampStats", (_message.Message,), - dict( - GranularStats=_reflection.GeneratedProtocolMessageType( + { + "GranularStats": _reflection.GeneratedProtocolMessageType( "GranularStats", (_message.Message,), - dict( - BucketsEntry=_reflection.GeneratedProtocolMessageType( + { + "BucketsEntry": _reflection.GeneratedProtocolMessageType( "BucketsEntry", (_message.Message,), - dict( - DESCRIPTOR=_TIMESTAMPSTATS_GRANULARSTATS_BUCKETSENTRY, - __module__="google.cloud.automl_v1beta1.proto.data_stats_pb2" + { + "DESCRIPTOR": _TIMESTAMPSTATS_GRANULARSTATS_BUCKETSENTRY, + "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2" # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TimestampStats.GranularStats.BucketsEntry) - ), + }, ), - DESCRIPTOR=_TIMESTAMPSTATS_GRANULARSTATS, - __module__="google.cloud.automl_v1beta1.proto.data_stats_pb2", - __doc__="""Stats split by a defined in context granularity. - + "DESCRIPTOR": _TIMESTAMPSTATS_GRANULARSTATS, + "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", + "__doc__": """Stats split by a defined in context granularity. Attributes: buckets: A map from granularity key to example count for that key. E.g. - for hour\_of\_day ``13`` means 1pm, or for month\_of\_year - ``5`` means May). + for hour_of_day ``13`` means 1pm, or for month_of_year ``5`` + means May). """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TimestampStats.GranularStats) - ), + }, ), - GranularStatsEntry=_reflection.GeneratedProtocolMessageType( + "GranularStatsEntry": _reflection.GeneratedProtocolMessageType( "GranularStatsEntry", (_message.Message,), - dict( - DESCRIPTOR=_TIMESTAMPSTATS_GRANULARSTATSENTRY, - __module__="google.cloud.automl_v1beta1.proto.data_stats_pb2" + { + "DESCRIPTOR": _TIMESTAMPSTATS_GRANULARSTATSENTRY, + "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2" # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TimestampStats.GranularStatsEntry) - ), + }, ), - DESCRIPTOR=_TIMESTAMPSTATS, - __module__="google.cloud.automl_v1beta1.proto.data_stats_pb2", - __doc__="""The data statistics of a series of TIMESTAMP values. - + "DESCRIPTOR": _TIMESTAMPSTATS, + "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", + "__doc__": """The data statistics of a series of TIMESTAMP values. Attributes: granular_stats: The string key is the pre-defined granularity. Currently - supported: hour\_of\_day, day\_of\_week, month\_of\_year. + supported: hour_of_day, day_of_week, month_of_year. Granularities finer that the granularity of timestamp data are - not populated (e.g. if timestamps are at day granularity, then - hour\_of\_day is not populated). + not populated (e.g. if timestamps are at day granularity, then + hour_of_day is not populated). """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TimestampStats) - ), + }, ) _sym_db.RegisterMessage(TimestampStats) _sym_db.RegisterMessage(TimestampStats.GranularStats) @@ -1217,11 +1253,10 @@ ArrayStats = _reflection.GeneratedProtocolMessageType( "ArrayStats", (_message.Message,), - dict( - DESCRIPTOR=_ARRAYSTATS, - __module__="google.cloud.automl_v1beta1.proto.data_stats_pb2", - __doc__="""The data statistics of a series of ARRAY values. - + { + "DESCRIPTOR": _ARRAYSTATS, + "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", + "__doc__": """The data statistics of a series of ARRAY values. Attributes: member_stats: @@ -1230,27 +1265,26 @@ type of the array. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ArrayStats) - ), + }, ) _sym_db.RegisterMessage(ArrayStats) StructStats = _reflection.GeneratedProtocolMessageType( "StructStats", (_message.Message,), - dict( - FieldStatsEntry=_reflection.GeneratedProtocolMessageType( + { + "FieldStatsEntry": _reflection.GeneratedProtocolMessageType( "FieldStatsEntry", (_message.Message,), - dict( - DESCRIPTOR=_STRUCTSTATS_FIELDSTATSENTRY, - __module__="google.cloud.automl_v1beta1.proto.data_stats_pb2" + { + "DESCRIPTOR": _STRUCTSTATS_FIELDSTATSENTRY, + "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2" # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.StructStats.FieldStatsEntry) - ), + }, ), - DESCRIPTOR=_STRUCTSTATS, - __module__="google.cloud.automl_v1beta1.proto.data_stats_pb2", - __doc__="""The data statistics of a series of STRUCT values. - + "DESCRIPTOR": _STRUCTSTATS, + "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", + "__doc__": """The data statistics of a series of STRUCT values. Attributes: field_stats: @@ -1258,7 +1292,7 @@ over series of all data in that field across all the structs. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.StructStats) - ), + }, ) _sym_db.RegisterMessage(StructStats) _sym_db.RegisterMessage(StructStats.FieldStatsEntry) @@ -1266,15 +1300,14 @@ CategoryStats = _reflection.GeneratedProtocolMessageType( "CategoryStats", (_message.Message,), - dict( - SingleCategoryStats=_reflection.GeneratedProtocolMessageType( + { + "SingleCategoryStats": _reflection.GeneratedProtocolMessageType( "SingleCategoryStats", (_message.Message,), - dict( - DESCRIPTOR=_CATEGORYSTATS_SINGLECATEGORYSTATS, - __module__="google.cloud.automl_v1beta1.proto.data_stats_pb2", - __doc__="""The statistics of a single CATEGORY value. - + { + "DESCRIPTOR": _CATEGORYSTATS_SINGLECATEGORYSTATS, + "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", + "__doc__": """The statistics of a single CATEGORY value. Attributes: value: @@ -1283,12 +1316,11 @@ The number of occurrences of this value in the series. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.CategoryStats.SingleCategoryStats) - ), + }, ), - DESCRIPTOR=_CATEGORYSTATS, - __module__="google.cloud.automl_v1beta1.proto.data_stats_pb2", - __doc__="""The data statistics of a series of CATEGORY values. - + "DESCRIPTOR": _CATEGORYSTATS, + "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", + "__doc__": """The data statistics of a series of CATEGORY values. Attributes: top_category_stats: @@ -1297,7 +1329,7 @@ ats.count]. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.CategoryStats) - ), + }, ) _sym_db.RegisterMessage(CategoryStats) _sym_db.RegisterMessage(CategoryStats.SingleCategoryStats) @@ -1305,20 +1337,19 @@ CorrelationStats = _reflection.GeneratedProtocolMessageType( "CorrelationStats", (_message.Message,), - dict( - DESCRIPTOR=_CORRELATIONSTATS, - __module__="google.cloud.automl_v1beta1.proto.data_stats_pb2", - __doc__="""A correlation statistics between two series of DataType - values. The series may have differing DataType-s, but within a single - series the DataType must be the same. - + { + "DESCRIPTOR": _CORRELATIONSTATS, + "__module__": "google.cloud.automl_v1beta1.proto.data_stats_pb2", + "__doc__": """A correlation statistics between two series of DataType values. The + series may have differing DataType-s, but within a single series the + DataType must be the same. Attributes: cramers_v: - The correlation value using the Cramer's V measure. + The correlation value using the Cramer’s V measure. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.CorrelationStats) - ), + }, ) _sym_db.RegisterMessage(CorrelationStats) diff --git a/google/cloud/automl_v1beta1/proto/data_types.proto b/google/cloud/automl_v1beta1/proto/data_types.proto index 086e96e3..6f77f56b 100644 --- a/google/cloud/automl_v1beta1/proto/data_types.proto +++ b/google/cloud/automl_v1beta1/proto/data_types.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; diff --git a/google/cloud/automl_v1beta1/proto/data_types_pb2.py b/google/cloud/automl_v1beta1/proto/data_types_pb2.py index 96121059..cb1993a8 100644 --- a/google/cloud/automl_v1beta1/proto/data_types_pb2.py +++ b/google/cloud/automl_v1beta1/proto/data_types_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/data_types.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message @@ -23,13 +20,10 @@ name="google/cloud/automl_v1beta1/proto/data_types.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - '\n2google/cloud/automl_v1beta1/proto/data_types.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto"\xfc\x01\n\x08\x44\x61taType\x12\x42\n\x11list_element_type\x18\x02 \x01(\x0b\x32%.google.cloud.automl.v1beta1.DataTypeH\x00\x12>\n\x0bstruct_type\x18\x03 \x01(\x0b\x32\'.google.cloud.automl.v1beta1.StructTypeH\x00\x12\x15\n\x0btime_format\x18\x05 \x01(\tH\x00\x12\x38\n\ttype_code\x18\x01 \x01(\x0e\x32%.google.cloud.automl.v1beta1.TypeCode\x12\x10\n\x08nullable\x18\x04 \x01(\x08\x42\t\n\x07\x64\x65tails"\xa7\x01\n\nStructType\x12\x43\n\x06\x66ields\x18\x01 \x03(\x0b\x32\x33.google.cloud.automl.v1beta1.StructType.FieldsEntry\x1aT\n\x0b\x46ieldsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x34\n\x05value\x18\x02 \x01(\x0b\x32%.google.cloud.automl.v1beta1.DataType:\x02\x38\x01*r\n\x08TypeCode\x12\x19\n\x15TYPE_CODE_UNSPECIFIED\x10\x00\x12\x0b\n\x07\x46LOAT64\x10\x03\x12\r\n\tTIMESTAMP\x10\x04\x12\n\n\x06STRING\x10\x06\x12\t\n\x05\x41RRAY\x10\x08\x12\n\n\x06STRUCT\x10\t\x12\x0c\n\x08\x43\x41TEGORY\x10\nB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3' - ), - dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR], + serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n2google/cloud/automl_v1beta1/proto/data_types.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto"\xfc\x01\n\x08\x44\x61taType\x12\x42\n\x11list_element_type\x18\x02 \x01(\x0b\x32%.google.cloud.automl.v1beta1.DataTypeH\x00\x12>\n\x0bstruct_type\x18\x03 \x01(\x0b\x32\'.google.cloud.automl.v1beta1.StructTypeH\x00\x12\x15\n\x0btime_format\x18\x05 \x01(\tH\x00\x12\x38\n\ttype_code\x18\x01 \x01(\x0e\x32%.google.cloud.automl.v1beta1.TypeCode\x12\x10\n\x08nullable\x18\x04 \x01(\x08\x42\t\n\x07\x64\x65tails"\xa7\x01\n\nStructType\x12\x43\n\x06\x66ields\x18\x01 \x03(\x0b\x32\x33.google.cloud.automl.v1beta1.StructType.FieldsEntry\x1aT\n\x0b\x46ieldsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x34\n\x05value\x18\x02 \x01(\x0b\x32%.google.cloud.automl.v1beta1.DataType:\x02\x38\x01*r\n\x08TypeCode\x12\x19\n\x15TYPE_CODE_UNSPECIFIED\x10\x00\x12\x0b\n\x07\x46LOAT64\x10\x03\x12\r\n\tTIMESTAMP\x10\x04\x12\n\n\x06STRING\x10\x06\x12\t\n\x05\x41RRAY\x10\x08\x12\n\n\x06STRUCT\x10\t\x12\x0c\n\x08\x43\x41TEGORY\x10\nB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', + dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,], ) _TYPECODE = _descriptor.EnumDescriptor( @@ -37,6 +31,7 @@ full_name="google.cloud.automl.v1beta1.TypeCode", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name="TYPE_CODE_UNSPECIFIED", @@ -44,24 +39,55 @@ number=0, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="FLOAT64", index=1, number=3, serialized_options=None, type=None + name="FLOAT64", + index=1, + number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="TIMESTAMP", index=2, number=4, serialized_options=None, type=None + name="TIMESTAMP", + index=2, + number=4, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="STRING", index=3, number=6, serialized_options=None, type=None + name="STRING", + index=3, + number=6, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="ARRAY", index=4, number=8, serialized_options=None, type=None + name="ARRAY", + index=4, + number=8, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="STRUCT", index=5, number=9, serialized_options=None, type=None + name="STRUCT", + index=5, + number=9, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="CATEGORY", index=6, number=10, serialized_options=None, type=None + name="CATEGORY", + index=6, + number=10, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, @@ -87,6 +113,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="list_element_type", @@ -105,6 +132,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="struct_type", @@ -123,6 +151,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="time_format", @@ -133,7 +162,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -141,6 +170,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="type_code", @@ -159,6 +189,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="nullable", @@ -177,6 +208,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -192,8 +224,9 @@ full_name="google.cloud.automl.v1beta1.DataType.details", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], - ) + ), ], serialized_start=114, serialized_end=366, @@ -206,6 +239,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="key", @@ -216,7 +250,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -224,6 +258,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="value", @@ -242,12 +277,13 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], nested_types=[], enum_types=[], - serialized_options=_b("8\001"), + serialized_options=b"8\001", is_extendable=False, syntax="proto3", extension_ranges=[], @@ -262,6 +298,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="fields", @@ -280,10 +317,11 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + create_key=_descriptor._internal_create_key, + ), ], extensions=[], - nested_types=[_STRUCTTYPE_FIELDSENTRY], + nested_types=[_STRUCTTYPE_FIELDSENTRY,], enum_types=[], serialized_options=None, is_extendable=False, @@ -326,39 +364,35 @@ DataType = _reflection.GeneratedProtocolMessageType( "DataType", (_message.Message,), - dict( - DESCRIPTOR=_DATATYPE, - __module__="google.cloud.automl_v1beta1.proto.data_types_pb2", - __doc__="""Indicated the type of data that can be stored in a - structured data entity (e.g. a table). - + { + "DESCRIPTOR": _DATATYPE, + "__module__": "google.cloud.automl_v1beta1.proto.data_types_pb2", + "__doc__": """Indicated the type of data that can be stored in a structured data + entity (e.g. a table). Attributes: details: Details of DataType-s that need additional specification. list_element_type: - If - [type\_code][google.cloud.automl.v1beta1.DataType.type\_code] + If [type_code][google.cloud.automl.v1beta1.DataType.type_code] == [ARRAY][google.cloud.automl.v1beta1.TypeCode.ARRAY], then ``list_element_type`` is the type of the elements. struct_type: - If - [type\_code][google.cloud.automl.v1beta1.DataType.type\_code] + If [type_code][google.cloud.automl.v1beta1.DataType.type_code] == [STRUCT][google.cloud.automl.v1beta1.TypeCode.STRUCT], then - ``struct_type`` provides type information for the struct's + ``struct_type`` provides type information for the struct’s fields. time_format: - If - [type\_code][google.cloud.automl.v1beta1.DataType.type\_code] + If [type_code][google.cloud.automl.v1beta1.DataType.type_code] == [TIMESTAMP][google.cloud.automl.v1beta1.TypeCode.TIMESTAMP] then ``time_format`` provides the format in which that time - field is expressed. The time\_format must either be one of: \* + field is expressed. The time_format must either be one of: \* ``UNIX_SECONDS`` \* ``UNIX_MILLISECONDS`` \* ``UNIX_MICROSECONDS`` \* ``UNIX_NANOSECONDS`` (for respectively number of seconds, milliseconds, microseconds and nanoseconds since start of the Unix epoch); or be written in - ``strftime`` syntax. If time\_format is not set, then the - default format as described on the type\_code is used. + ``strftime`` syntax. If time_format is not set, then the + default format as described on the type_code is used. type_code: Required. The [TypeCode][google.cloud.automl.v1beta1.TypeCode] for this type. @@ -367,29 +401,28 @@ ``NULL`` value is expressed as an empty string. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.DataType) - ), + }, ) _sym_db.RegisterMessage(DataType) StructType = _reflection.GeneratedProtocolMessageType( "StructType", (_message.Message,), - dict( - FieldsEntry=_reflection.GeneratedProtocolMessageType( + { + "FieldsEntry": _reflection.GeneratedProtocolMessageType( "FieldsEntry", (_message.Message,), - dict( - DESCRIPTOR=_STRUCTTYPE_FIELDSENTRY, - __module__="google.cloud.automl_v1beta1.proto.data_types_pb2" + { + "DESCRIPTOR": _STRUCTTYPE_FIELDSENTRY, + "__module__": "google.cloud.automl_v1beta1.proto.data_types_pb2" # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.StructType.FieldsEntry) - ), + }, ), - DESCRIPTOR=_STRUCTTYPE, - __module__="google.cloud.automl_v1beta1.proto.data_types_pb2", - __doc__="""\ ``StructType`` defines the DataType-s of a + "DESCRIPTOR": _STRUCTTYPE, + "__module__": "google.cloud.automl_v1beta1.proto.data_types_pb2", + "__doc__": """\ ``StructType`` defines the DataType-s of a [STRUCT][google.cloud.automl.v1beta1.TypeCode.STRUCT] type. - Attributes: fields: Unordered map of struct field names to their data types. @@ -397,7 +430,7 @@ data types are still mutable. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.StructType) - ), + }, ) _sym_db.RegisterMessage(StructType) _sym_db.RegisterMessage(StructType.FieldsEntry) diff --git a/google/cloud/automl_v1beta1/proto/dataset.proto b/google/cloud/automl_v1beta1/proto/dataset.proto index e07b1784..8d1b8d93 100644 --- a/google/cloud/automl_v1beta1/proto/dataset.proto +++ b/google/cloud/automl_v1beta1/proto/dataset.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,12 +11,12 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; package google.cloud.automl.v1beta1; +import "google/api/resource.proto"; import "google/cloud/automl/v1beta1/image.proto"; import "google/cloud/automl/v1beta1/tables.proto"; import "google/cloud/automl/v1beta1/text.proto"; @@ -34,6 +34,11 @@ option ruby_package = "Google::Cloud::AutoML::V1beta1"; // A workspace for solving a single, particular machine learning (ML) problem. // A workspace contains examples that may be annotated. message Dataset { + option (google.api.resource) = { + type: "automl.googleapis.com/Dataset" + pattern: "projects/{project}/locations/{location}/datasets/{dataset}" + }; + // Required. // The dataset metadata that is specific to the problem type. oneof dataset_metadata { diff --git a/google/cloud/automl_v1beta1/proto/dataset_pb2.py b/google/cloud/automl_v1beta1/proto/dataset_pb2.py index fa8b2153..28aa5238 100644 --- a/google/cloud/automl_v1beta1/proto/dataset_pb2.py +++ b/google/cloud/automl_v1beta1/proto/dataset_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/dataset.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -15,6 +12,7 @@ _sym_db = _symbol_database.Default() +from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 from google.cloud.automl_v1beta1.proto import ( image_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_image__pb2, ) @@ -38,13 +36,11 @@ name="google/cloud/automl_v1beta1/proto/dataset.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - "\n/google/cloud/automl_v1beta1/proto/dataset.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a-google/cloud/automl_v1beta1/proto/image.proto\x1a.google/cloud/automl_v1beta1/proto/tables.proto\x1a,google/cloud/automl_v1beta1/proto/text.proto\x1a\x33google/cloud/automl_v1beta1/proto/translation.proto\x1a-google/cloud/automl_v1beta1/proto/video.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/api/annotations.proto\"\xee\x08\n\x07\x44\x61taset\x12_\n\x1ctranslation_dataset_metadata\x18\x17 \x01(\x0b\x32\x37.google.cloud.automl.v1beta1.TranslationDatasetMetadataH\x00\x12p\n%image_classification_dataset_metadata\x18\x18 \x01(\x0b\x32?.google.cloud.automl.v1beta1.ImageClassificationDatasetMetadataH\x00\x12n\n$text_classification_dataset_metadata\x18\x19 \x01(\x0b\x32>.google.cloud.automl.v1beta1.TextClassificationDatasetMetadataH\x00\x12s\n'image_object_detection_dataset_metadata\x18\x1a \x01(\x0b\x32@.google.cloud.automl.v1beta1.ImageObjectDetectionDatasetMetadataH\x00\x12p\n%video_classification_dataset_metadata\x18\x1f \x01(\x0b\x32?.google.cloud.automl.v1beta1.VideoClassificationDatasetMetadataH\x00\x12q\n&video_object_tracking_dataset_metadata\x18\x1d \x01(\x0b\x32?.google.cloud.automl.v1beta1.VideoObjectTrackingDatasetMetadataH\x00\x12\x66\n text_extraction_dataset_metadata\x18\x1c \x01(\x0b\x32:.google.cloud.automl.v1beta1.TextExtractionDatasetMetadataH\x00\x12\x64\n\x1ftext_sentiment_dataset_metadata\x18\x1e \x01(\x0b\x32\x39.google.cloud.automl.v1beta1.TextSentimentDatasetMetadataH\x00\x12U\n\x17tables_dataset_metadata\x18! \x01(\x0b\x32\x32.google.cloud.automl.v1beta1.TablesDatasetMetadataH\x00\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\x12\x15\n\rexample_count\x18\x15 \x01(\x05\x12/\n\x0b\x63reate_time\x18\x0e \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0c\n\x04\x65tag\x18\x11 \x01(\tB\x12\n\x10\x64\x61taset_metadataB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3" - ), + serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + create_key=_descriptor._internal_create_key, + serialized_pb=b"\n/google/cloud/automl_v1beta1/proto/dataset.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x19google/api/resource.proto\x1a-google/cloud/automl_v1beta1/proto/image.proto\x1a.google/cloud/automl_v1beta1/proto/tables.proto\x1a,google/cloud/automl_v1beta1/proto/text.proto\x1a\x33google/cloud/automl_v1beta1/proto/translation.proto\x1a-google/cloud/automl_v1beta1/proto/video.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/api/annotations.proto\"\xce\t\n\x07\x44\x61taset\x12_\n\x1ctranslation_dataset_metadata\x18\x17 \x01(\x0b\x32\x37.google.cloud.automl.v1beta1.TranslationDatasetMetadataH\x00\x12p\n%image_classification_dataset_metadata\x18\x18 \x01(\x0b\x32?.google.cloud.automl.v1beta1.ImageClassificationDatasetMetadataH\x00\x12n\n$text_classification_dataset_metadata\x18\x19 \x01(\x0b\x32>.google.cloud.automl.v1beta1.TextClassificationDatasetMetadataH\x00\x12s\n'image_object_detection_dataset_metadata\x18\x1a \x01(\x0b\x32@.google.cloud.automl.v1beta1.ImageObjectDetectionDatasetMetadataH\x00\x12p\n%video_classification_dataset_metadata\x18\x1f \x01(\x0b\x32?.google.cloud.automl.v1beta1.VideoClassificationDatasetMetadataH\x00\x12q\n&video_object_tracking_dataset_metadata\x18\x1d \x01(\x0b\x32?.google.cloud.automl.v1beta1.VideoObjectTrackingDatasetMetadataH\x00\x12\x66\n text_extraction_dataset_metadata\x18\x1c \x01(\x0b\x32:.google.cloud.automl.v1beta1.TextExtractionDatasetMetadataH\x00\x12\x64\n\x1ftext_sentiment_dataset_metadata\x18\x1e \x01(\x0b\x32\x39.google.cloud.automl.v1beta1.TextSentimentDatasetMetadataH\x00\x12U\n\x17tables_dataset_metadata\x18! \x01(\x0b\x32\x32.google.cloud.automl.v1beta1.TablesDatasetMetadataH\x00\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\t\x12\x15\n\rexample_count\x18\x15 \x01(\x05\x12/\n\x0b\x63reate_time\x18\x0e \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0c\n\x04\x65tag\x18\x11 \x01(\t:^\xea\x41[\n\x1d\x61utoml.googleapis.com/Dataset\x12:projects/{project}/locations/{location}/datasets/{dataset}B\x12\n\x10\x64\x61taset_metadataB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3", dependencies=[ + google_dot_api_dot_resource__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_image__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_tables__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_text__pb2.DESCRIPTOR, @@ -62,6 +58,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="translation_dataset_metadata", @@ -80,6 +77,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="image_classification_dataset_metadata", @@ -98,6 +96,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="text_classification_dataset_metadata", @@ -116,6 +115,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="image_object_detection_dataset_metadata", @@ -134,6 +134,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="video_classification_dataset_metadata", @@ -152,6 +153,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="video_object_tracking_dataset_metadata", @@ -170,6 +172,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="text_extraction_dataset_metadata", @@ -188,6 +191,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="text_sentiment_dataset_metadata", @@ -206,6 +210,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="tables_dataset_metadata", @@ -224,6 +229,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="name", @@ -234,7 +240,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -242,6 +248,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="display_name", @@ -252,7 +259,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -260,6 +267,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="description", @@ -270,7 +278,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -278,6 +286,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="example_count", @@ -296,6 +305,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="create_time", @@ -314,6 +324,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="etag", @@ -324,7 +335,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -332,12 +343,13 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], nested_types=[], enum_types=[], - serialized_options=None, + serialized_options=b"\352A[\n\035automl.googleapis.com/Dataset\022:projects/{project}/locations/{location}/datasets/{dataset}", is_extendable=False, syntax="proto3", extension_ranges=[], @@ -347,11 +359,12 @@ full_name="google.cloud.automl.v1beta1.Dataset.dataset_metadata", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], - ) + ), ], - serialized_start=385, - serialized_end=1519, + serialized_start=412, + serialized_end=1642, ) _DATASET.fields_by_name[ @@ -462,13 +475,11 @@ Dataset = _reflection.GeneratedProtocolMessageType( "Dataset", (_message.Message,), - dict( - DESCRIPTOR=_DATASET, - __module__="google.cloud.automl_v1beta1.proto.dataset_pb2", - __doc__="""A workspace for solving a single, particular machine - learning (ML) problem. A workspace contains examples that may be - annotated. - + { + "DESCRIPTOR": _DATASET, + "__module__": "google.cloud.automl_v1beta1.proto.dataset_pb2", + "__doc__": """A workspace for solving a single, particular machine learning (ML) + problem. A workspace contains examples that may be annotated. Attributes: dataset_metadata: @@ -498,8 +509,8 @@ display_name: Required. The name of the dataset to show in the interface. The name can be up to 32 characters long and can consist only - of ASCII Latin letters A-Z and a-z, underscores (\_), and - ASCII digits 0-9. + of ASCII Latin letters A-Z and a-z, underscores (_), and ASCII + digits 0-9. description: User-provided description of the dataset. The description can be up to 25000 characters long. @@ -509,13 +520,14 @@ Output only. Timestamp when this dataset was created. etag: Used to perform consistent read-modify-write updates. If not - set, a blind "overwrite" update happens. + set, a blind “overwrite” update happens. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.Dataset) - ), + }, ) _sym_db.RegisterMessage(Dataset) DESCRIPTOR._options = None +_DATASET._options = None # @@protoc_insertion_point(module_scope) diff --git a/google/cloud/automl_v1beta1/proto/detection.proto b/google/cloud/automl_v1beta1/proto/detection.proto index 99761fd5..c5864e20 100644 --- a/google/cloud/automl_v1beta1/proto/detection.proto +++ b/google/cloud/automl_v1beta1/proto/detection.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; diff --git a/google/cloud/automl_v1beta1/proto/detection_pb2.py b/google/cloud/automl_v1beta1/proto/detection_pb2.py index ab328c84..940fac4d 100644 --- a/google/cloud/automl_v1beta1/proto/detection_pb2.py +++ b/google/cloud/automl_v1beta1/proto/detection_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/detection.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -26,12 +23,9 @@ name="google/cloud/automl_v1beta1/proto/detection.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - '\n1google/cloud/automl_v1beta1/proto/detection.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x30google/cloud/automl_v1beta1/proto/geometry.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1cgoogle/api/annotations.proto"p\n\x1eImageObjectDetectionAnnotation\x12?\n\x0c\x62ounding_box\x18\x01 \x01(\x0b\x32).google.cloud.automl.v1beta1.BoundingPoly\x12\r\n\x05score\x18\x02 \x01(\x02"\xb4\x01\n\x1dVideoObjectTrackingAnnotation\x12\x13\n\x0binstance_id\x18\x01 \x01(\t\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12?\n\x0c\x62ounding_box\x18\x03 \x01(\x0b\x32).google.cloud.automl.v1beta1.BoundingPoly\x12\r\n\x05score\x18\x04 \x01(\x02"\xae\x02\n\x17\x42oundingBoxMetricsEntry\x12\x15\n\riou_threshold\x18\x01 \x01(\x02\x12\x1e\n\x16mean_average_precision\x18\x02 \x01(\x02\x12o\n\x1a\x63onfidence_metrics_entries\x18\x03 \x03(\x0b\x32K.google.cloud.automl.v1beta1.BoundingBoxMetricsEntry.ConfidenceMetricsEntry\x1ak\n\x16\x43onfidenceMetricsEntry\x12\x1c\n\x14\x63onfidence_threshold\x18\x01 \x01(\x02\x12\x0e\n\x06recall\x18\x02 \x01(\x02\x12\x11\n\tprecision\x18\x03 \x01(\x02\x12\x10\n\x08\x66\x31_score\x18\x04 \x01(\x02"\xd6\x01\n%ImageObjectDetectionEvaluationMetrics\x12$\n\x1c\x65valuated_bounding_box_count\x18\x01 \x01(\x05\x12Z\n\x1c\x62ounding_box_metrics_entries\x18\x02 \x03(\x0b\x32\x34.google.cloud.automl.v1beta1.BoundingBoxMetricsEntry\x12+\n#bounding_box_mean_average_precision\x18\x03 \x01(\x02"\xf4\x01\n$VideoObjectTrackingEvaluationMetrics\x12\x1d\n\x15\x65valuated_frame_count\x18\x01 \x01(\x05\x12$\n\x1c\x65valuated_bounding_box_count\x18\x02 \x01(\x05\x12Z\n\x1c\x62ounding_box_metrics_entries\x18\x04 \x03(\x0b\x32\x34.google.cloud.automl.v1beta1.BoundingBoxMetricsEntry\x12+\n#bounding_box_mean_average_precision\x18\x06 \x01(\x02\x42\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3' - ), + serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n1google/cloud/automl_v1beta1/proto/detection.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x30google/cloud/automl_v1beta1/proto/geometry.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1cgoogle/api/annotations.proto"p\n\x1eImageObjectDetectionAnnotation\x12?\n\x0c\x62ounding_box\x18\x01 \x01(\x0b\x32).google.cloud.automl.v1beta1.BoundingPoly\x12\r\n\x05score\x18\x02 \x01(\x02"\xb4\x01\n\x1dVideoObjectTrackingAnnotation\x12\x13\n\x0binstance_id\x18\x01 \x01(\t\x12.\n\x0btime_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12?\n\x0c\x62ounding_box\x18\x03 \x01(\x0b\x32).google.cloud.automl.v1beta1.BoundingPoly\x12\r\n\x05score\x18\x04 \x01(\x02"\xae\x02\n\x17\x42oundingBoxMetricsEntry\x12\x15\n\riou_threshold\x18\x01 \x01(\x02\x12\x1e\n\x16mean_average_precision\x18\x02 \x01(\x02\x12o\n\x1a\x63onfidence_metrics_entries\x18\x03 \x03(\x0b\x32K.google.cloud.automl.v1beta1.BoundingBoxMetricsEntry.ConfidenceMetricsEntry\x1ak\n\x16\x43onfidenceMetricsEntry\x12\x1c\n\x14\x63onfidence_threshold\x18\x01 \x01(\x02\x12\x0e\n\x06recall\x18\x02 \x01(\x02\x12\x11\n\tprecision\x18\x03 \x01(\x02\x12\x10\n\x08\x66\x31_score\x18\x04 \x01(\x02"\xd6\x01\n%ImageObjectDetectionEvaluationMetrics\x12$\n\x1c\x65valuated_bounding_box_count\x18\x01 \x01(\x05\x12Z\n\x1c\x62ounding_box_metrics_entries\x18\x02 \x03(\x0b\x32\x34.google.cloud.automl.v1beta1.BoundingBoxMetricsEntry\x12+\n#bounding_box_mean_average_precision\x18\x03 \x01(\x02"\xf4\x01\n$VideoObjectTrackingEvaluationMetrics\x12\x1d\n\x15\x65valuated_frame_count\x18\x01 \x01(\x05\x12$\n\x1c\x65valuated_bounding_box_count\x18\x02 \x01(\x05\x12Z\n\x1c\x62ounding_box_metrics_entries\x18\x04 \x03(\x0b\x32\x34.google.cloud.automl.v1beta1.BoundingBoxMetricsEntry\x12+\n#bounding_box_mean_average_precision\x18\x06 \x01(\x02\x42\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_geometry__pb2.DESCRIPTOR, google_dot_protobuf_dot_duration__pb2.DESCRIPTOR, @@ -46,6 +40,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="bounding_box", @@ -64,6 +59,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="score", @@ -82,6 +78,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -103,6 +100,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="instance_id", @@ -113,7 +111,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -121,6 +119,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="time_offset", @@ -139,6 +138,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="bounding_box", @@ -157,6 +157,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="score", @@ -175,6 +176,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -196,6 +198,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="confidence_threshold", @@ -214,6 +217,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="recall", @@ -232,6 +236,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="precision", @@ -250,6 +255,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="f1_score", @@ -268,6 +274,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -288,6 +295,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="iou_threshold", @@ -306,6 +314,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="mean_average_precision", @@ -324,6 +333,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="confidence_metrics_entries", @@ -342,10 +352,11 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], - nested_types=[_BOUNDINGBOXMETRICSENTRY_CONFIDENCEMETRICSENTRY], + nested_types=[_BOUNDINGBOXMETRICSENTRY_CONFIDENCEMETRICSENTRY,], enum_types=[], serialized_options=None, is_extendable=False, @@ -363,6 +374,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="evaluated_bounding_box_count", @@ -381,6 +393,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="bounding_box_metrics_entries", @@ -399,6 +412,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="bounding_box_mean_average_precision", @@ -417,6 +431,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -438,6 +453,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="evaluated_frame_count", @@ -456,6 +472,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="evaluated_bounding_box_count", @@ -474,6 +491,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="bounding_box_metrics_entries", @@ -492,6 +510,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="bounding_box_mean_average_precision", @@ -510,6 +529,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -567,11 +587,10 @@ ImageObjectDetectionAnnotation = _reflection.GeneratedProtocolMessageType( "ImageObjectDetectionAnnotation", (_message.Message,), - dict( - DESCRIPTOR=_IMAGEOBJECTDETECTIONANNOTATION, - __module__="google.cloud.automl_v1beta1.proto.detection_pb2", - __doc__="""Annotation details for image object detection. - + { + "DESCRIPTOR": _IMAGEOBJECTDETECTIONANNOTATION, + "__module__": "google.cloud.automl_v1beta1.proto.detection_pb2", + "__doc__": """Annotation details for image object detection. Attributes: bounding_box: @@ -582,58 +601,57 @@ positivity confidence. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ImageObjectDetectionAnnotation) - ), + }, ) _sym_db.RegisterMessage(ImageObjectDetectionAnnotation) VideoObjectTrackingAnnotation = _reflection.GeneratedProtocolMessageType( "VideoObjectTrackingAnnotation", (_message.Message,), - dict( - DESCRIPTOR=_VIDEOOBJECTTRACKINGANNOTATION, - __module__="google.cloud.automl_v1beta1.proto.detection_pb2", - __doc__="""Annotation details for video object tracking. - + { + "DESCRIPTOR": _VIDEOOBJECTTRACKINGANNOTATION, + "__module__": "google.cloud.automl_v1beta1.proto.detection_pb2", + "__doc__": """Annotation details for video object tracking. Attributes: instance_id: Optional. The instance of the object, expressed as a positive - integer. Used to tell apart objects of the same type (i.e. - AnnotationSpec) when multiple are present on a single example. - NOTE: Instance ID prediction quality is not a part of model - evaluation and is done as best effort. Especially in cases - when an entity goes off-screen for a longer time (minutes), - when it comes back it may be given a new instance ID. + integer. Used to tell apart objects of the same type + (i.e. AnnotationSpec) when multiple are present on a single + example. NOTE: Instance ID prediction quality is not a part of + model evaluation and is done as best effort. Especially in + cases when an entity goes off-screen for a longer time + (minutes), when it comes back it may be given a new instance + ID. time_offset: Required. A time (frame) of a video to which this annotation - pertains. Represented as the duration since the video's start. + pertains. Represented as the duration since the video’s start. bounding_box: Required. The rectangle representing the object location on - the frame (i.e. at the time\_offset of the video). + the frame ( i.e. at the time_offset of the video). score: Output only. The confidence that this annotation is positive - for the video at the time\_offset, value in [0, 1], higher + for the video at the time_offset, value in [0, 1], higher means higher positivity confidence. For annotations created by the user the score is 1. When user approves an annotation, the original float score is kept (and not changed to 1). """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.VideoObjectTrackingAnnotation) - ), + }, ) _sym_db.RegisterMessage(VideoObjectTrackingAnnotation) BoundingBoxMetricsEntry = _reflection.GeneratedProtocolMessageType( "BoundingBoxMetricsEntry", (_message.Message,), - dict( - ConfidenceMetricsEntry=_reflection.GeneratedProtocolMessageType( + { + "ConfidenceMetricsEntry": _reflection.GeneratedProtocolMessageType( "ConfidenceMetricsEntry", (_message.Message,), - dict( - DESCRIPTOR=_BOUNDINGBOXMETRICSENTRY_CONFIDENCEMETRICSENTRY, - __module__="google.cloud.automl_v1beta1.proto.detection_pb2", - __doc__="""Metrics for a single confidence threshold. - + { + "DESCRIPTOR": _BOUNDINGBOXMETRICSENTRY_CONFIDENCEMETRICSENTRY, + "__module__": "google.cloud.automl_v1beta1.proto.detection_pb2", + "__doc__": """Metrics for a single confidence threshold. Attributes: confidence_threshold: @@ -647,14 +665,12 @@ Output only. The harmonic mean of recall and precision. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.BoundingBoxMetricsEntry.ConfidenceMetricsEntry) - ), + }, ), - DESCRIPTOR=_BOUNDINGBOXMETRICSENTRY, - __module__="google.cloud.automl_v1beta1.proto.detection_pb2", - __doc__="""Bounding box matching model metrics for a single - intersection-over-union threshold and multiple label match confidence - thresholds. - + "DESCRIPTOR": _BOUNDINGBOXMETRICSENTRY, + "__module__": "google.cloud.automl_v1beta1.proto.detection_pb2", + "__doc__": """Bounding box matching model metrics for a single intersection-over- + union threshold and multiple label match confidence thresholds. Attributes: iou_threshold: @@ -662,15 +678,14 @@ to compute this metrics entry. mean_average_precision: Output only. The mean average precision, most often close to - au\_prc. + au_prc. confidence_metrics_entries: - Output only. Metrics for each label-match - confidence\_threshold from - 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99. Precision-recall curve - is derived from them. + Output only. Metrics for each label-match confidence_threshold + from 0.05,0.10,…,0.95,0.96,0.97,0.98,0.99. Precision-recall + curve is derived from them. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.BoundingBoxMetricsEntry) - ), + }, ) _sym_db.RegisterMessage(BoundingBoxMetricsEntry) _sym_db.RegisterMessage(BoundingBoxMetricsEntry.ConfidenceMetricsEntry) @@ -678,67 +693,62 @@ ImageObjectDetectionEvaluationMetrics = _reflection.GeneratedProtocolMessageType( "ImageObjectDetectionEvaluationMetrics", (_message.Message,), - dict( - DESCRIPTOR=_IMAGEOBJECTDETECTIONEVALUATIONMETRICS, - __module__="google.cloud.automl_v1beta1.proto.detection_pb2", - __doc__="""Model evaluation metrics for image object detection - problems. Evaluates prediction quality of labeled bounding boxes. - + { + "DESCRIPTOR": _IMAGEOBJECTDETECTIONEVALUATIONMETRICS, + "__module__": "google.cloud.automl_v1beta1.proto.detection_pb2", + "__doc__": """Model evaluation metrics for image object detection problems. + Evaluates prediction quality of labeled bounding boxes. Attributes: evaluated_bounding_box_count: - Output only. The total number of bounding boxes (i.e. summed + Output only. The total number of bounding boxes (i.e. summed over all images) the ground truth used to create this evaluation had. bounding_box_metrics_entries: Output only. The bounding boxes match metrics for each Intersection-over-union threshold - 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 and each label - confidence threshold 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 - pair. + 0.05,0.10,…,0.95,0.96,0.97,0.98,0.99 and each label confidence + threshold 0.05,0.10,…,0.95,0.96,0.97,0.98,0.99 pair. bounding_box_mean_average_precision: Output only. The single metric for bounding boxes evaluation: - the mean\_average\_precision averaged over all - bounding\_box\_metrics\_entries. + the mean_average_precision averaged over all + bounding_box_metrics_entries. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ImageObjectDetectionEvaluationMetrics) - ), + }, ) _sym_db.RegisterMessage(ImageObjectDetectionEvaluationMetrics) VideoObjectTrackingEvaluationMetrics = _reflection.GeneratedProtocolMessageType( "VideoObjectTrackingEvaluationMetrics", (_message.Message,), - dict( - DESCRIPTOR=_VIDEOOBJECTTRACKINGEVALUATIONMETRICS, - __module__="google.cloud.automl_v1beta1.proto.detection_pb2", - __doc__="""Model evaluation metrics for video object tracking - problems. Evaluates prediction quality of both labeled bounding boxes - and labeled tracks (i.e. series of bounding boxes sharing same label and - instance ID). - + { + "DESCRIPTOR": _VIDEOOBJECTTRACKINGEVALUATIONMETRICS, + "__module__": "google.cloud.automl_v1beta1.proto.detection_pb2", + "__doc__": """Model evaluation metrics for video object tracking problems. Evaluates + prediction quality of both labeled bounding boxes and labeled tracks + (i.e. series of bounding boxes sharing same label and instance ID). Attributes: evaluated_frame_count: Output only. The number of video frames used to create this evaluation. evaluated_bounding_box_count: - Output only. The total number of bounding boxes (i.e. summed + Output only. The total number of bounding boxes (i.e. summed over all frames) the ground truth used to create this evaluation had. bounding_box_metrics_entries: Output only. The bounding boxes match metrics for each Intersection-over-union threshold - 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 and each label - confidence threshold 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 - pair. + 0.05,0.10,…,0.95,0.96,0.97,0.98,0.99 and each label confidence + threshold 0.05,0.10,…,0.95,0.96,0.97,0.98,0.99 pair. bounding_box_mean_average_precision: Output only. The single metric for bounding boxes evaluation: - the mean\_average\_precision averaged over all - bounding\_box\_metrics\_entries. + the mean_average_precision averaged over all + bounding_box_metrics_entries. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.VideoObjectTrackingEvaluationMetrics) - ), + }, ) _sym_db.RegisterMessage(VideoObjectTrackingEvaluationMetrics) diff --git a/google/cloud/automl_v1beta1/proto/geometry.proto b/google/cloud/automl_v1beta1/proto/geometry.proto index e5379ab1..d5654aac 100644 --- a/google/cloud/automl_v1beta1/proto/geometry.proto +++ b/google/cloud/automl_v1beta1/proto/geometry.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; diff --git a/google/cloud/automl_v1beta1/proto/geometry_pb2.py b/google/cloud/automl_v1beta1/proto/geometry_pb2.py index 324d76f5..2d355059 100644 --- a/google/cloud/automl_v1beta1/proto/geometry_pb2.py +++ b/google/cloud/automl_v1beta1/proto/geometry_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/geometry.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -22,13 +19,10 @@ name="google/cloud/automl_v1beta1/proto/geometry.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - '\n0google/cloud/automl_v1beta1/proto/geometry.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto"(\n\x10NormalizedVertex\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02"Z\n\x0c\x42oundingPoly\x12J\n\x13normalized_vertices\x18\x02 \x03(\x0b\x32-.google.cloud.automl.v1beta1.NormalizedVertexB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3' - ), - dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR], + serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n0google/cloud/automl_v1beta1/proto/geometry.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto"(\n\x10NormalizedVertex\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02"Z\n\x0c\x42oundingPoly\x12J\n\x13normalized_vertices\x18\x02 \x03(\x0b\x32-.google.cloud.automl.v1beta1.NormalizedVertexB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', + dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,], ) @@ -38,6 +32,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="x", @@ -56,6 +51,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="y", @@ -74,6 +70,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -95,6 +92,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="normalized_vertices", @@ -113,7 +111,8 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + create_key=_descriptor._internal_create_key, + ), ], extensions=[], nested_types=[], @@ -135,38 +134,36 @@ NormalizedVertex = _reflection.GeneratedProtocolMessageType( "NormalizedVertex", (_message.Message,), - dict( - DESCRIPTOR=_NORMALIZEDVERTEX, - __module__="google.cloud.automl_v1beta1.proto.geometry_pb2", - __doc__="""Required. Horizontal coordinate. - + { + "DESCRIPTOR": _NORMALIZEDVERTEX, + "__module__": "google.cloud.automl_v1beta1.proto.geometry_pb2", + "__doc__": """Required. Horizontal coordinate. Attributes: y: Required. Vertical coordinate. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.NormalizedVertex) - ), + }, ) _sym_db.RegisterMessage(NormalizedVertex) BoundingPoly = _reflection.GeneratedProtocolMessageType( "BoundingPoly", (_message.Message,), - dict( - DESCRIPTOR=_BOUNDINGPOLY, - __module__="google.cloud.automl_v1beta1.proto.geometry_pb2", - __doc__="""A bounding polygon of a detected object on a plane. On - output both vertices and normalized\_vertices are provided. The polygon - is formed by connecting vertices in the order they are listed. - + { + "DESCRIPTOR": _BOUNDINGPOLY, + "__module__": "google.cloud.automl_v1beta1.proto.geometry_pb2", + "__doc__": """A bounding polygon of a detected object on a plane. On output both + vertices and normalized_vertices are provided. The polygon is formed + by connecting vertices in the order they are listed. Attributes: normalized_vertices: Output only . The bounding polygon normalized vertices. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.BoundingPoly) - ), + }, ) _sym_db.RegisterMessage(BoundingPoly) diff --git a/google/cloud/automl_v1beta1/proto/image.proto b/google/cloud/automl_v1beta1/proto/image.proto index 5995efc6..960eaeb0 100644 --- a/google/cloud/automl_v1beta1/proto/image.proto +++ b/google/cloud/automl_v1beta1/proto/image.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,16 +11,16 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; package google.cloud.automl.v1beta1; -import "google/api/annotations.proto"; +import "google/api/resource.proto"; import "google/cloud/automl/v1beta1/annotation_spec.proto"; import "google/cloud/automl/v1beta1/classification.proto"; import "google/protobuf/timestamp.proto"; +import "google/api/annotations.proto"; option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl"; option java_multiple_files = true; @@ -36,7 +36,9 @@ message ImageClassificationDatasetMetadata { } // Dataset metadata specific to image object detection. -message ImageObjectDetectionDatasetMetadata {} +message ImageObjectDetectionDatasetMetadata { + +} // Model metadata for image classification. message ImageClassificationModelMetadata { @@ -65,38 +67,34 @@ message ImageClassificationModelMetadata { // This is the default value. // * `mobile-low-latency-1` - A model that, in addition to providing // prediction via AutoML API, can also be exported (see - // [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) - // and used on a mobile or edge device with TensorFlow - // afterwards. Expected to have low latency, but may have lower - // prediction quality than other models. + // [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile or edge device + // with TensorFlow afterwards. Expected to have low latency, but + // may have lower prediction quality than other models. // * `mobile-versatile-1` - A model that, in addition to providing // prediction via AutoML API, can also be exported (see - // [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) - // and used on a mobile or edge device with TensorFlow - // afterwards. + // [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile or edge device + // with TensorFlow afterwards. // * `mobile-high-accuracy-1` - A model that, in addition to providing // prediction via AutoML API, can also be exported (see - // [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) - // and used on a mobile or edge device with TensorFlow - // afterwards. Expected to have a higher latency, but should - // also have a higher prediction quality than other models. + // [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile or edge device + // with TensorFlow afterwards. Expected to have a higher + // latency, but should also have a higher prediction quality + // than other models. // * `mobile-core-ml-low-latency-1` - A model that, in addition to providing // prediction via AutoML API, can also be exported (see - // [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) - // and used on a mobile device with Core ML afterwards. Expected - // to have low latency, but may have lower prediction quality - // than other models. + // [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile device with Core + // ML afterwards. Expected to have low latency, but may have + // lower prediction quality than other models. // * `mobile-core-ml-versatile-1` - A model that, in addition to providing // prediction via AutoML API, can also be exported (see - // [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) - // and used on a mobile device with Core ML afterwards. + // [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile device with Core + // ML afterwards. // * `mobile-core-ml-high-accuracy-1` - A model that, in addition to // providing prediction via AutoML API, can also be exported - // (see - // [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) - // and used on a mobile device with Core ML afterwards. Expected - // to have a higher latency, but should also have a higher - // prediction quality than other models. + // (see [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile device with + // Core ML afterwards. Expected to have a higher latency, but + // should also have a higher prediction quality than other + // models. string model_type = 7; // Output only. An approximate number of online prediction QPS that can @@ -119,6 +117,21 @@ message ImageObjectDetectionModelMetadata { // * `cloud-low-latency-1` - A model to be used via prediction // calls to AutoML API. Expected to have low latency, but may // have lower prediction quality than other models. + // * `mobile-low-latency-1` - A model that, in addition to providing + // prediction via AutoML API, can also be exported (see + // [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile or edge device + // with TensorFlow afterwards. Expected to have low latency, but + // may have lower prediction quality than other models. + // * `mobile-versatile-1` - A model that, in addition to providing + // prediction via AutoML API, can also be exported (see + // [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile or edge device + // with TensorFlow afterwards. + // * `mobile-high-accuracy-1` - A model that, in addition to providing + // prediction via AutoML API, can also be exported (see + // [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) and used on a mobile or edge device + // with TensorFlow afterwards. Expected to have a higher + // latency, but should also have a higher prediction quality + // than other models. string model_type = 1; // Output only. The number of nodes this model is deployed on. A node is an diff --git a/google/cloud/automl_v1beta1/proto/image_pb2.py b/google/cloud/automl_v1beta1/proto/image_pb2.py index 3a0a54a4..6f17f2c5 100644 --- a/google/cloud/automl_v1beta1/proto/image_pb2.py +++ b/google/cloud/automl_v1beta1/proto/image_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/image.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -15,7 +12,7 @@ _sym_db = _symbol_database.Default() -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 +from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 from google.cloud.automl_v1beta1.proto import ( annotation_spec_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_annotation__spec__pb2, ) @@ -23,23 +20,22 @@ classification_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2, ) from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name="google/cloud/automl_v1beta1/proto/image.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1B\nImageProtoP\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - '\n-google/cloud/automl_v1beta1/proto/image.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x37google/cloud/automl_v1beta1/proto/annotation_spec.proto\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto\x1a\x1fgoogle/protobuf/timestamp.proto"r\n"ImageClassificationDatasetMetadata\x12L\n\x13\x63lassification_type\x18\x01 \x01(\x0e\x32/.google.cloud.automl.v1beta1.ClassificationType"%\n#ImageObjectDetectionDatasetMetadata"\xb2\x01\n ImageClassificationModelMetadata\x12\x15\n\rbase_model_id\x18\x01 \x01(\t\x12\x14\n\x0ctrain_budget\x18\x02 \x01(\x03\x12\x12\n\ntrain_cost\x18\x03 \x01(\x03\x12\x13\n\x0bstop_reason\x18\x05 \x01(\t\x12\x12\n\nmodel_type\x18\x07 \x01(\t\x12\x10\n\x08node_qps\x18\r \x01(\x01\x12\x12\n\nnode_count\x18\x0e \x01(\x03"\xbe\x01\n!ImageObjectDetectionModelMetadata\x12\x12\n\nmodel_type\x18\x01 \x01(\t\x12\x12\n\nnode_count\x18\x03 \x01(\x03\x12\x10\n\x08node_qps\x18\x04 \x01(\x01\x12\x13\n\x0bstop_reason\x18\x05 \x01(\t\x12%\n\x1dtrain_budget_milli_node_hours\x18\x06 \x01(\x03\x12#\n\x1btrain_cost_milli_node_hours\x18\x07 \x01(\x03"@\n*ImageClassificationModelDeploymentMetadata\x12\x12\n\nnode_count\x18\x01 \x01(\x03"A\n+ImageObjectDetectionModelDeploymentMetadata\x12\x12\n\nnode_count\x18\x01 \x01(\x03\x42\xb1\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\nImageProtoP\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3' - ), + serialized_options=b"\n\037com.google.cloud.automl.v1beta1B\nImageProtoP\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n-google/cloud/automl_v1beta1/proto/image.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x19google/api/resource.proto\x1a\x37google/cloud/automl_v1beta1/proto/annotation_spec.proto\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/api/annotations.proto"r\n"ImageClassificationDatasetMetadata\x12L\n\x13\x63lassification_type\x18\x01 \x01(\x0e\x32/.google.cloud.automl.v1beta1.ClassificationType"%\n#ImageObjectDetectionDatasetMetadata"\xb2\x01\n ImageClassificationModelMetadata\x12\x15\n\rbase_model_id\x18\x01 \x01(\t\x12\x14\n\x0ctrain_budget\x18\x02 \x01(\x03\x12\x12\n\ntrain_cost\x18\x03 \x01(\x03\x12\x13\n\x0bstop_reason\x18\x05 \x01(\t\x12\x12\n\nmodel_type\x18\x07 \x01(\t\x12\x10\n\x08node_qps\x18\r \x01(\x01\x12\x12\n\nnode_count\x18\x0e \x01(\x03"\xbe\x01\n!ImageObjectDetectionModelMetadata\x12\x12\n\nmodel_type\x18\x01 \x01(\t\x12\x12\n\nnode_count\x18\x03 \x01(\x03\x12\x10\n\x08node_qps\x18\x04 \x01(\x01\x12\x13\n\x0bstop_reason\x18\x05 \x01(\t\x12%\n\x1dtrain_budget_milli_node_hours\x18\x06 \x01(\x03\x12#\n\x1btrain_cost_milli_node_hours\x18\x07 \x01(\x03"@\n*ImageClassificationModelDeploymentMetadata\x12\x12\n\nnode_count\x18\x01 \x01(\x03"A\n+ImageObjectDetectionModelDeploymentMetadata\x12\x12\n\nnode_count\x18\x01 \x01(\x03\x42\xb1\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\nImageProtoP\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, + google_dot_api_dot_resource__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_annotation__spec__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2.DESCRIPTOR, google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, + google_dot_api_dot_annotations__pb2.DESCRIPTOR, ], ) @@ -50,6 +46,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="classification_type", @@ -68,7 +65,8 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + create_key=_descriptor._internal_create_key, + ), ], extensions=[], nested_types=[], @@ -78,8 +76,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=254, - serialized_end=368, + serialized_start=281, + serialized_end=395, ) @@ -89,6 +87,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], extensions=[], nested_types=[], @@ -98,8 +97,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=370, - serialized_end=407, + serialized_start=397, + serialized_end=434, ) @@ -109,6 +108,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="base_model_id", @@ -119,7 +119,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -127,6 +127,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="train_budget", @@ -145,6 +146,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="train_cost", @@ -163,6 +165,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="stop_reason", @@ -173,7 +176,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -181,6 +184,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="model_type", @@ -191,7 +195,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -199,6 +203,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="node_qps", @@ -217,6 +222,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="node_count", @@ -235,6 +241,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -245,8 +252,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=410, - serialized_end=588, + serialized_start=437, + serialized_end=615, ) @@ -256,6 +263,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="model_type", @@ -266,7 +274,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -274,6 +282,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="node_count", @@ -292,6 +301,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="node_qps", @@ -310,6 +320,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="stop_reason", @@ -320,7 +331,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -328,6 +339,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="train_budget_milli_node_hours", @@ -346,6 +358,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="train_cost_milli_node_hours", @@ -364,6 +377,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -374,8 +388,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=591, - serialized_end=781, + serialized_start=618, + serialized_end=808, ) @@ -385,6 +399,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="node_count", @@ -403,7 +418,8 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + create_key=_descriptor._internal_create_key, + ), ], extensions=[], nested_types=[], @@ -413,8 +429,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=783, - serialized_end=847, + serialized_start=810, + serialized_end=874, ) @@ -424,6 +440,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="node_count", @@ -442,7 +459,8 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + create_key=_descriptor._internal_create_key, + ), ], extensions=[], nested_types=[], @@ -452,8 +470,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=849, - serialized_end=914, + serialized_start=876, + serialized_end=941, ) _IMAGECLASSIFICATIONDATASETMETADATA.fields_by_name[ @@ -484,43 +502,39 @@ ImageClassificationDatasetMetadata = _reflection.GeneratedProtocolMessageType( "ImageClassificationDatasetMetadata", (_message.Message,), - dict( - DESCRIPTOR=_IMAGECLASSIFICATIONDATASETMETADATA, - __module__="google.cloud.automl_v1beta1.proto.image_pb2", - __doc__="""Dataset metadata that is specific to image classification. - + { + "DESCRIPTOR": _IMAGECLASSIFICATIONDATASETMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.image_pb2", + "__doc__": """Dataset metadata that is specific to image classification. Attributes: classification_type: Required. Type of the classification problem. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ImageClassificationDatasetMetadata) - ), + }, ) _sym_db.RegisterMessage(ImageClassificationDatasetMetadata) ImageObjectDetectionDatasetMetadata = _reflection.GeneratedProtocolMessageType( "ImageObjectDetectionDatasetMetadata", (_message.Message,), - dict( - DESCRIPTOR=_IMAGEOBJECTDETECTIONDATASETMETADATA, - __module__="google.cloud.automl_v1beta1.proto.image_pb2", - __doc__="""Dataset metadata specific to image object detection. - - """, + { + "DESCRIPTOR": _IMAGEOBJECTDETECTIONDATASETMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.image_pb2", + "__doc__": """Dataset metadata specific to image object detection.""", # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ImageObjectDetectionDatasetMetadata) - ), + }, ) _sym_db.RegisterMessage(ImageObjectDetectionDatasetMetadata) ImageClassificationModelMetadata = _reflection.GeneratedProtocolMessageType( "ImageClassificationModelMetadata", (_message.Message,), - dict( - DESCRIPTOR=_IMAGECLASSIFICATIONMODELMETADATA, - __module__="google.cloud.automl_v1beta1.proto.image_pb2", - __doc__="""Model metadata for image classification. - + { + "DESCRIPTOR": _IMAGECLASSIFICATIONMODELMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.image_pb2", + "__doc__": """Model metadata for image classification. Attributes: base_model_id: @@ -584,21 +598,20 @@ node_count: Output only. The number of nodes this model is deployed on. A node is an abstraction of a machine resource, which can handle - online prediction QPS as given in the node\_qps field. + online prediction QPS as given in the node_qps field. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ImageClassificationModelMetadata) - ), + }, ) _sym_db.RegisterMessage(ImageClassificationModelMetadata) ImageObjectDetectionModelMetadata = _reflection.GeneratedProtocolMessageType( "ImageObjectDetectionModelMetadata", (_message.Message,), - dict( - DESCRIPTOR=_IMAGEOBJECTDETECTIONMODELMETADATA, - __module__="google.cloud.automl_v1beta1.proto.image_pb2", - __doc__="""Model metadata specific to image object detection. - + { + "DESCRIPTOR": _IMAGEOBJECTDETECTIONMODELMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.image_pb2", + "__doc__": """Model metadata specific to image object detection. Attributes: model_type: @@ -609,11 +622,27 @@ other models. \* ``cloud-low-latency-1`` - A model to be used via prediction calls to AutoML API. Expected to have low latency, but may have lower prediction quality than other + models. \* ``mobile-low-latency-1`` - A model that, in + addition to providing prediction via AutoML API, can also be + exported (see [AutoMl.ExportModel][google.cloud.automl.v1beta1 + .AutoMl.ExportModel]) and used on a mobile or edge device with + TensorFlow afterwards. Expected to have low latency, but may + have lower prediction quality than other models. \* ``mobile- + versatile-1`` - A model that, in addition to providing + prediction via AutoML API, can also be exported (see [AutoMl.E + xportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]) + and used on a mobile or edge device with TensorFlow + afterwards. \* ``mobile-high-accuracy-1`` - A model that, in + addition to providing prediction via AutoML API, can also be + exported (see [AutoMl.ExportModel][google.cloud.automl.v1beta1 + .AutoMl.ExportModel]) and used on a mobile or edge device with + TensorFlow afterwards. Expected to have a higher latency, but + should also have a higher prediction quality than other models. node_count: Output only. The number of nodes this model is deployed on. A node is an abstraction of a machine resource, which can handle - online prediction QPS as given in the qps\_per\_node field. + online prediction QPS as given in the qps_per_node field. node_qps: Output only. An approximate number of online prediction QPS that can be supported by this model per each node on which it @@ -623,12 +652,12 @@ stopped, e.g. ``BUDGET_REACHED``, ``MODEL_CONVERGED``. train_budget_milli_node_hours: The train budget of creating this model, expressed in milli - node hours i.e. 1,000 value in this field means 1 node hour. + node hours i.e. 1,000 value in this field means 1 node hour. The actual ``train_cost`` will be equal or less than this value. If further model training ceases to provide any improvements, it will stop without using full budget and the - stop\_reason will be ``MODEL_CONVERGED``. Note, node\_hour = - actual\_hour \* number\_of\_nodes\_invovled. For model type + stop_reason will be ``MODEL_CONVERGED``. Note, node_hour = + actual_hour \* number_of_nodes_invovled. For model type ``cloud-high-accuracy-1``\ (default) and ``cloud-low- latency-1``, the train budget must be between 20,000 and 900,000 milli node hours, inclusive. The default value is 216, @@ -641,58 +670,54 @@ represents one day in wall time. train_cost_milli_node_hours: Output only. The actual train cost of creating this model, - expressed in milli node hours, i.e. 1,000 value in this field + expressed in milli node hours, i.e. 1,000 value in this field means 1 node hour. Guaranteed to not exceed the train budget. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ImageObjectDetectionModelMetadata) - ), + }, ) _sym_db.RegisterMessage(ImageObjectDetectionModelMetadata) ImageClassificationModelDeploymentMetadata = _reflection.GeneratedProtocolMessageType( "ImageClassificationModelDeploymentMetadata", (_message.Message,), - dict( - DESCRIPTOR=_IMAGECLASSIFICATIONMODELDEPLOYMENTMETADATA, - __module__="google.cloud.automl_v1beta1.proto.image_pb2", - __doc__="""Model deployment metadata specific to Image - Classification. - + { + "DESCRIPTOR": _IMAGECLASSIFICATIONMODELDEPLOYMENTMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.image_pb2", + "__doc__": """Model deployment metadata specific to Image Classification. Attributes: node_count: Input only. The number of nodes to deploy the model on. A node is an abstraction of a machine resource, which can handle - online prediction QPS as given in the model's [node\_qps][goo - gle.cloud.automl.v1beta1.ImageClassificationModelMetadata.node - \_qps]. Must be between 1 and 100, inclusive on both ends. + online prediction QPS as given in the model’s [node_qps][goog + le.cloud.automl.v1beta1.ImageClassificationModelMetadata.node\_ + qps]. Must be between 1 and 100, inclusive on both ends. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ImageClassificationModelDeploymentMetadata) - ), + }, ) _sym_db.RegisterMessage(ImageClassificationModelDeploymentMetadata) ImageObjectDetectionModelDeploymentMetadata = _reflection.GeneratedProtocolMessageType( "ImageObjectDetectionModelDeploymentMetadata", (_message.Message,), - dict( - DESCRIPTOR=_IMAGEOBJECTDETECTIONMODELDEPLOYMENTMETADATA, - __module__="google.cloud.automl_v1beta1.proto.image_pb2", - __doc__="""Model deployment metadata specific to Image Object - Detection. - + { + "DESCRIPTOR": _IMAGEOBJECTDETECTIONMODELDEPLOYMENTMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.image_pb2", + "__doc__": """Model deployment metadata specific to Image Object Detection. Attributes: node_count: Input only. The number of nodes to deploy the model on. A node is an abstraction of a machine resource, which can handle - online prediction QPS as given in the model's [qps\_per\_node - ][google.cloud.automl.v1beta1.ImageObjectDetectionModelMetadat - a.qps\_per\_node]. Must be between 1 and 100, inclusive on - both ends. + online prediction QPS as given in the model’s [qps_per_node][ + google.cloud.automl.v1beta1.ImageObjectDetectionModelMetadata. + qps_per_node]. Must be between 1 and 100, inclusive on both + ends. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ImageObjectDetectionModelDeploymentMetadata) - ), + }, ) _sym_db.RegisterMessage(ImageObjectDetectionModelDeploymentMetadata) diff --git a/google/cloud/automl_v1beta1/proto/io.proto b/google/cloud/automl_v1beta1/proto/io.proto index 5cc61c5e..a9979383 100644 --- a/google/cloud/automl_v1beta1/proto/io.proto +++ b/google/cloud/automl_v1beta1/proto/io.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; diff --git a/google/cloud/automl_v1beta1/proto/io_pb2.py b/google/cloud/automl_v1beta1/proto/io_pb2.py index c2fb6138..13bd8be1 100644 --- a/google/cloud/automl_v1beta1/proto/io_pb2.py +++ b/google/cloud/automl_v1beta1/proto/io_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/io.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -22,13 +19,10 @@ name="google/cloud/automl_v1beta1/proto/io.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - '\n*google/cloud/automl_v1beta1/proto/io.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto"\x92\x02\n\x0bInputConfig\x12<\n\ngcs_source\x18\x01 \x01(\x0b\x32&.google.cloud.automl.v1beta1.GcsSourceH\x00\x12\x46\n\x0f\x62igquery_source\x18\x03 \x01(\x0b\x32+.google.cloud.automl.v1beta1.BigQuerySourceH\x00\x12\x44\n\x06params\x18\x02 \x03(\x0b\x32\x34.google.cloud.automl.v1beta1.InputConfig.ParamsEntry\x1a-\n\x0bParamsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\x08\n\x06source"\xa9\x01\n\x17\x42\x61tchPredictInputConfig\x12<\n\ngcs_source\x18\x01 \x01(\x0b\x32&.google.cloud.automl.v1beta1.GcsSourceH\x00\x12\x46\n\x0f\x62igquery_source\x18\x02 \x01(\x0b\x32+.google.cloud.automl.v1beta1.BigQuerySourceH\x00\x42\x08\n\x06source"Q\n\x13\x44ocumentInputConfig\x12:\n\ngcs_source\x18\x01 \x01(\x0b\x32&.google.cloud.automl.v1beta1.GcsSource"\xb7\x01\n\x0cOutputConfig\x12\x46\n\x0fgcs_destination\x18\x01 \x01(\x0b\x32+.google.cloud.automl.v1beta1.GcsDestinationH\x00\x12P\n\x14\x62igquery_destination\x18\x02 \x01(\x0b\x32\x30.google.cloud.automl.v1beta1.BigQueryDestinationH\x00\x42\r\n\x0b\x64\x65stination"\xc3\x01\n\x18\x42\x61tchPredictOutputConfig\x12\x46\n\x0fgcs_destination\x18\x01 \x01(\x0b\x32+.google.cloud.automl.v1beta1.GcsDestinationH\x00\x12P\n\x14\x62igquery_destination\x18\x02 \x01(\x0b\x32\x30.google.cloud.automl.v1beta1.BigQueryDestinationH\x00\x42\r\n\x0b\x64\x65stination"\xcf\x02\n\x17ModelExportOutputConfig\x12\x46\n\x0fgcs_destination\x18\x01 \x01(\x0b\x32+.google.cloud.automl.v1beta1.GcsDestinationH\x00\x12\x46\n\x0fgcr_destination\x18\x03 \x01(\x0b\x32+.google.cloud.automl.v1beta1.GcrDestinationH\x00\x12\x14\n\x0cmodel_format\x18\x04 \x01(\t\x12P\n\x06params\x18\x02 \x03(\x0b\x32@.google.cloud.automl.v1beta1.ModelExportOutputConfig.ParamsEntry\x1a-\n\x0bParamsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\r\n\x0b\x64\x65stination"\x86\x01\n#ExportEvaluatedExamplesOutputConfig\x12P\n\x14\x62igquery_destination\x18\x02 \x01(\x0b\x32\x30.google.cloud.automl.v1beta1.BigQueryDestinationH\x00\x42\r\n\x0b\x64\x65stination"\x1f\n\tGcsSource\x12\x12\n\ninput_uris\x18\x01 \x03(\t"#\n\x0e\x42igQuerySource\x12\x11\n\tinput_uri\x18\x01 \x01(\t"+\n\x0eGcsDestination\x12\x19\n\x11output_uri_prefix\x18\x01 \x01(\t")\n\x13\x42igQueryDestination\x12\x12\n\noutput_uri\x18\x01 \x01(\t"$\n\x0eGcrDestination\x12\x12\n\noutput_uri\x18\x01 \x01(\tB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3' - ), - dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR], + serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n*google/cloud/automl_v1beta1/proto/io.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto"\x92\x02\n\x0bInputConfig\x12<\n\ngcs_source\x18\x01 \x01(\x0b\x32&.google.cloud.automl.v1beta1.GcsSourceH\x00\x12\x46\n\x0f\x62igquery_source\x18\x03 \x01(\x0b\x32+.google.cloud.automl.v1beta1.BigQuerySourceH\x00\x12\x44\n\x06params\x18\x02 \x03(\x0b\x32\x34.google.cloud.automl.v1beta1.InputConfig.ParamsEntry\x1a-\n\x0bParamsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\x08\n\x06source"\xa9\x01\n\x17\x42\x61tchPredictInputConfig\x12<\n\ngcs_source\x18\x01 \x01(\x0b\x32&.google.cloud.automl.v1beta1.GcsSourceH\x00\x12\x46\n\x0f\x62igquery_source\x18\x02 \x01(\x0b\x32+.google.cloud.automl.v1beta1.BigQuerySourceH\x00\x42\x08\n\x06source"Q\n\x13\x44ocumentInputConfig\x12:\n\ngcs_source\x18\x01 \x01(\x0b\x32&.google.cloud.automl.v1beta1.GcsSource"\xb7\x01\n\x0cOutputConfig\x12\x46\n\x0fgcs_destination\x18\x01 \x01(\x0b\x32+.google.cloud.automl.v1beta1.GcsDestinationH\x00\x12P\n\x14\x62igquery_destination\x18\x02 \x01(\x0b\x32\x30.google.cloud.automl.v1beta1.BigQueryDestinationH\x00\x42\r\n\x0b\x64\x65stination"\xc3\x01\n\x18\x42\x61tchPredictOutputConfig\x12\x46\n\x0fgcs_destination\x18\x01 \x01(\x0b\x32+.google.cloud.automl.v1beta1.GcsDestinationH\x00\x12P\n\x14\x62igquery_destination\x18\x02 \x01(\x0b\x32\x30.google.cloud.automl.v1beta1.BigQueryDestinationH\x00\x42\r\n\x0b\x64\x65stination"\xcf\x02\n\x17ModelExportOutputConfig\x12\x46\n\x0fgcs_destination\x18\x01 \x01(\x0b\x32+.google.cloud.automl.v1beta1.GcsDestinationH\x00\x12\x46\n\x0fgcr_destination\x18\x03 \x01(\x0b\x32+.google.cloud.automl.v1beta1.GcrDestinationH\x00\x12\x14\n\x0cmodel_format\x18\x04 \x01(\t\x12P\n\x06params\x18\x02 \x03(\x0b\x32@.google.cloud.automl.v1beta1.ModelExportOutputConfig.ParamsEntry\x1a-\n\x0bParamsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\r\n\x0b\x64\x65stination"\x86\x01\n#ExportEvaluatedExamplesOutputConfig\x12P\n\x14\x62igquery_destination\x18\x02 \x01(\x0b\x32\x30.google.cloud.automl.v1beta1.BigQueryDestinationH\x00\x42\r\n\x0b\x64\x65stination"\x1f\n\tGcsSource\x12\x12\n\ninput_uris\x18\x01 \x03(\t"#\n\x0e\x42igQuerySource\x12\x11\n\tinput_uri\x18\x01 \x01(\t"+\n\x0eGcsDestination\x12\x19\n\x11output_uri_prefix\x18\x01 \x01(\t")\n\x13\x42igQueryDestination\x12\x12\n\noutput_uri\x18\x01 \x01(\t"$\n\x0eGcrDestination\x12\x12\n\noutput_uri\x18\x01 \x01(\tB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', + dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,], ) @@ -38,6 +32,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="key", @@ -48,7 +43,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -56,6 +51,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="value", @@ -66,7 +62,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -74,12 +70,13 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], nested_types=[], enum_types=[], - serialized_options=_b("8\001"), + serialized_options=b"8\001", is_extendable=False, syntax="proto3", extension_ranges=[], @@ -94,6 +91,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="gcs_source", @@ -112,6 +110,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="bigquery_source", @@ -130,6 +129,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="params", @@ -148,10 +148,11 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], - nested_types=[_INPUTCONFIG_PARAMSENTRY], + nested_types=[_INPUTCONFIG_PARAMSENTRY,], enum_types=[], serialized_options=None, is_extendable=False, @@ -163,8 +164,9 @@ full_name="google.cloud.automl.v1beta1.InputConfig.source", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], - ) + ), ], serialized_start=106, serialized_end=380, @@ -177,6 +179,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="gcs_source", @@ -195,6 +198,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="bigquery_source", @@ -213,6 +217,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -228,8 +233,9 @@ full_name="google.cloud.automl.v1beta1.BatchPredictInputConfig.source", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], - ) + ), ], serialized_start=383, serialized_end=552, @@ -242,6 +248,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="gcs_source", @@ -260,7 +267,8 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + create_key=_descriptor._internal_create_key, + ), ], extensions=[], nested_types=[], @@ -281,6 +289,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="gcs_destination", @@ -299,6 +308,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="bigquery_destination", @@ -317,6 +327,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -332,8 +343,9 @@ full_name="google.cloud.automl.v1beta1.OutputConfig.destination", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], - ) + ), ], serialized_start=638, serialized_end=821, @@ -346,6 +358,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="gcs_destination", @@ -364,6 +377,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="bigquery_destination", @@ -382,6 +396,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -397,8 +412,9 @@ full_name="google.cloud.automl.v1beta1.BatchPredictOutputConfig.destination", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], - ) + ), ], serialized_start=824, serialized_end=1019, @@ -411,6 +427,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="key", @@ -421,7 +438,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -429,6 +446,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="value", @@ -439,7 +457,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -447,12 +465,13 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], nested_types=[], enum_types=[], - serialized_options=_b("8\001"), + serialized_options=b"8\001", is_extendable=False, syntax="proto3", extension_ranges=[], @@ -467,6 +486,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="gcs_destination", @@ -485,6 +505,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="gcr_destination", @@ -503,6 +524,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="model_format", @@ -513,7 +535,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -521,6 +543,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="params", @@ -539,10 +562,11 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], - nested_types=[_MODELEXPORTOUTPUTCONFIG_PARAMSENTRY], + nested_types=[_MODELEXPORTOUTPUTCONFIG_PARAMSENTRY,], enum_types=[], serialized_options=None, is_extendable=False, @@ -554,8 +578,9 @@ full_name="google.cloud.automl.v1beta1.ModelExportOutputConfig.destination", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], - ) + ), ], serialized_start=1022, serialized_end=1357, @@ -568,6 +593,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="bigquery_destination", @@ -586,7 +612,8 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + create_key=_descriptor._internal_create_key, + ), ], extensions=[], nested_types=[], @@ -601,8 +628,9 @@ full_name="google.cloud.automl.v1beta1.ExportEvaluatedExamplesOutputConfig.destination", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], - ) + ), ], serialized_start=1360, serialized_end=1494, @@ -615,6 +643,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="input_uris", @@ -633,7 +662,8 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + create_key=_descriptor._internal_create_key, + ), ], extensions=[], nested_types=[], @@ -654,6 +684,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="input_uri", @@ -664,7 +695,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -672,7 +703,8 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + create_key=_descriptor._internal_create_key, + ), ], extensions=[], nested_types=[], @@ -693,6 +725,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="output_uri_prefix", @@ -703,7 +736,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -711,7 +744,8 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + create_key=_descriptor._internal_create_key, + ), ], extensions=[], nested_types=[], @@ -732,6 +766,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="output_uri", @@ -742,7 +777,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -750,7 +785,8 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + create_key=_descriptor._internal_create_key, + ), ], extensions=[], nested_types=[], @@ -771,6 +807,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="output_uri", @@ -781,7 +818,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -789,7 +826,8 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + create_key=_descriptor._internal_create_key, + ), ], extensions=[], nested_types=[], @@ -918,246 +956,260 @@ InputConfig = _reflection.GeneratedProtocolMessageType( "InputConfig", (_message.Message,), - dict( - ParamsEntry=_reflection.GeneratedProtocolMessageType( + { + "ParamsEntry": _reflection.GeneratedProtocolMessageType( "ParamsEntry", (_message.Message,), - dict( - DESCRIPTOR=_INPUTCONFIG_PARAMSENTRY, - __module__="google.cloud.automl_v1beta1.proto.io_pb2" + { + "DESCRIPTOR": _INPUTCONFIG_PARAMSENTRY, + "__module__": "google.cloud.automl_v1beta1.proto.io_pb2" # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.InputConfig.ParamsEntry) - ), + }, ), - DESCRIPTOR=_INPUTCONFIG, - __module__="google.cloud.automl_v1beta1.proto.io_pb2", - __doc__="""Input configuration for ImportData Action. - - The format of input depends on dataset\_metadata the Dataset into which - the import is happening has. As input source the - [gcs\_source][google.cloud.automl.v1beta1.InputConfig.gcs\_source] is + "DESCRIPTOR": _INPUTCONFIG, + "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", + "__doc__": """Input configuration for ImportData Action. The format of input + depends on dataset_metadata the Dataset into which the import is + happening has. As input source the + [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] is expected, unless specified otherwise. Additionally any input .CSV file by itself must be 100MB or smaller, unless specified otherwise. If an - "example" file (that is, image, video etc.) with identical content (even - if it had different GCS\_FILE\_PATH) is mentioned multiple times, then - its label, bounding boxes etc. are appended. The same file should be - always provided with the same ML\_USE and GCS\_FILE\_PATH, if it is not, - then these values are nondeterministically selected from the given ones. - - The formats are represented in EBNF with commas being literal and with - non-terminal symbols defined near the end of this comment. The formats - are: - - - For Image Classification: CSV file(s) with each line in format: - ML\_USE,GCS\_FILE\_PATH,LABEL,LABEL,... GCS\_FILE\_PATH leads to - image of up to 30MB in size. Supported extensions: .JPEG, .GIF, .PNG, - .WEBP, .BMP, .TIFF, .ICO For MULTICLASS classification type, at most - one LABEL is allowed per image. If an image has not yet been labeled, - then it should be mentioned just once with no LABEL. Some sample - rows: TRAIN,gs://folder/image1.jpg,daisy - TEST,gs://folder/image2.jpg,dandelion,tulip,rose - UNASSIGNED,gs://folder/image3.jpg,daisy - UNASSIGNED,gs://folder/image4.jpg - - - For Image Object Detection: CSV file(s) with each line in format: - ML\_USE,GCS\_FILE\_PATH,(LABEL,BOUNDING\_BOX \| ,,,,,,,) - GCS\_FILE\_PATH leads to image of up to 30MB in size. Supported - extensions: .JPEG, .GIF, .PNG. Each image is assumed to be - exhaustively labeled. The minimum allowed BOUNDING\_BOX edge length - is 0.01, and no more than 500 BOUNDING\_BOX-es per image are allowed - (one BOUNDING\_BOX is defined per line). If an image has not yet been - labeled, then it should be mentioned just once with no LABEL and the - ",,,,,,," in place of the BOUNDING\_BOX. For images which are known - to not contain any bounding boxes, they should be labelled explictly - as "NEGATIVE\_IMAGE", followed by ",,,,,,," in place of the - BOUNDING\_BOX. Sample rows: - TRAIN,gs://folder/image1.png,car,0.1,0.1,,,0.3,0.3,, - TRAIN,gs://folder/image1.png,bike,.7,.6,,,.8,.9,, - UNASSIGNED,gs://folder/im2.png,car,0.1,0.1,0.2,0.1,0.2,0.3,0.1,0.3 - TEST,gs://folder/im3.png,,,,,,,,, - TRAIN,gs://folder/im4.png,NEGATIVE\_IMAGE,,,,,,,,, - - - For Video Classification: CSV file(s) with each line in format: - ML\_USE,GCS\_FILE\_PATH where ML\_USE VALIDATE value should not be - used. The GCS\_FILE\_PATH should lead to another .csv file which - describes examples that have given ML\_USE, using the following row - format: - GCS\_FILE\_PATH,(LABEL,TIME\_SEGMENT\_START,TIME\_SEGMENT\_END \| ,,) - Here GCS\_FILE\_PATH leads to a video of up to 50GB in size and up to - 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI. - TIME\_SEGMENT\_START and TIME\_SEGMENT\_END must be within the length - of the video, and end has to be after the start. Any segment of a - video which has one or more labels on it, is considered a hard - negative for all other labels. Any segment with no labels on it is - considered to be unknown. If a whole video is unknown, then it shuold - be mentioned just once with ",," in place of LABEL, - TIME\_SEGMENT\_START,TIME\_SEGMENT\_END. Sample top level CSV file: - TRAIN,gs://folder/train\_videos.csv TEST,gs://folder/test\_videos.csv - UNASSIGNED,gs://folder/other\_videos.csv Sample rows of a CSV file - for a particular ML\_USE: gs://folder/video1.avi,car,120,180.000021 - gs://folder/video1.avi,bike,150,180.000021 - gs://folder/vid2.avi,car,0,60.5 gs://folder/vid3.avi,,, - - - For Video Object Tracking: CSV file(s) with each line in format: - ML\_USE,GCS\_FILE\_PATH where ML\_USE VALIDATE value should not be - used. The GCS\_FILE\_PATH should lead to another .csv file which - describes examples that have given ML\_USE, using one of the - following row format: - GCS\_FILE\_PATH,LABEL,[INSTANCE\_ID],TIMESTAMP,BOUNDING\_BOX or - GCS\_FILE\_PATH,,,,,,,,,, Here GCS\_FILE\_PATH leads to a video of up - to 50GB in size and up to 3h duration. Supported extensions: .MOV, - .MPEG4, .MP4, .AVI. Providing INSTANCE\_IDs can help to obtain a - better model. When a specific labeled entity leaves the video frame, - and shows up afterwards it is not required, albeit preferable, that - the same INSTANCE\_ID is given to it. TIMESTAMP must be within the - length of the video, the BOUNDING\_BOX is assumed to be drawn on the - closest video's frame to the TIMESTAMP. Any mentioned by the - TIMESTAMP frame is expected to be exhaustively labeled and no more - than 500 BOUNDING\_BOX-es per frame are allowed. If a whole video is - unknown, then it should be mentioned just once with ",,,,,,,,,," in - place of LABEL, [INSTANCE\_ID],TIMESTAMP,BOUNDING\_BOX. Sample top - level CSV file: TRAIN,gs://folder/train\_videos.csv - TEST,gs://folder/test\_videos.csv - UNASSIGNED,gs://folder/other\_videos.csv Seven sample rows of a CSV - file for a particular ML\_USE: - gs://folder/video1.avi,car,1,12.10,0.8,0.8,0.9,0.8,0.9,0.9,0.8,0.9 - gs://folder/video1.avi,car,1,12.90,0.4,0.8,0.5,0.8,0.5,0.9,0.4,0.9 - gs://folder/video1.avi,car,2,12.10,.4,.2,.5,.2,.5,.3,.4,.3 - gs://folder/video1.avi,car,2,12.90,.8,.2,,,.9,.3,, - gs://folder/video1.avi,bike,,12.50,.45,.45,,,.55,.55,, - gs://folder/video2.avi,car,1,0,.1,.9,,,.9,.1,, - gs://folder/video2.avi,,,,,,,,,,, - - For Text Extraction: CSV file(s) with each line in format: - ML\_USE,GCS\_FILE\_PATH GCS\_FILE\_PATH leads to a .JSONL (that is, - JSON Lines) file which either imports text in-line or as documents. - Any given .JSONL file must be 100MB or smaller. The in-line .JSONL - file contains, per line, a proto that wraps a TextSnippet proto (in - json representation) followed by one or more AnnotationPayload protos - (called annotations), which have display\_name and text\_extraction - detail populated. The given text is expected to be annotated - exhaustively, for example, if you look for animals and text contains - "dolphin" that is not labeled, then "dolphin" is assumed to not be an - animal. Any given text snippet content must be 10KB or smaller, and - also be UTF-8 NFC encoded (ASCII already is). The document .JSONL - file contains, per line, a proto that wraps a Document proto. The - Document proto must have either document\_text or input\_config set. - In document\_text case, the Document proto may also contain the - spatial information of the document, including layout, document - dimension and page number. In input\_config case, only PDF documents - are supported now, and each document may be up to 2MB large. - Currently, annotations on documents cannot be specified at import. - Three sample CSV rows: TRAIN,gs://folder/file1.jsonl - VALIDATE,gs://folder/file2.jsonl TEST,gs://folder/file3.jsonl - - - For Text Classification: CSV file(s) with each line in format: - ML\_USE,(TEXT\_SNIPPET \| GCS\_FILE\_PATH),LABEL,LABEL,... - TEXT\_SNIPPET and GCS\_FILE\_PATH are distinguished by a pattern. If - the column content is a valid gcs file path, i.e. prefixed by - "gs://", it will be treated as a GCS\_FILE\_PATH, else if the content - is enclosed within double quotes (""), it is treated as a - TEXT\_SNIPPET. In the GCS\_FILE\_PATH case, the path must lead to a - .txt file with UTF-8 encoding, for example, - "gs://folder/content.txt", and the content in it is extracted as a - text snippet. In TEXT\_SNIPPET case, the column content excluding - quotes is treated as to be imported text snippet. In both cases, the - text snippet/file size must be within 128kB. Maximum 100 unique - labels are allowed per CSV row. Sample rows: TRAIN,"They have bad - food and very rude",RudeService,BadFood - TRAIN,gs://folder/content.txt,SlowService TEST,"Typically always bad - service there.",RudeService VALIDATE,"Stomach ache to go.",BadFood - - - For Text Sentiment: CSV file(s) with each line in format: - ML\_USE,(TEXT\_SNIPPET \| GCS\_FILE\_PATH),SENTIMENT TEXT\_SNIPPET - and GCS\_FILE\_PATH are distinguished by a pattern. If the column - content is a valid gcs file path, that is, prefixed by "gs://", it is - treated as a GCS\_FILE\_PATH, otherwise it is treated as a - TEXT\_SNIPPET. In the GCS\_FILE\_PATH case, the path must lead to a - .txt file with UTF-8 encoding, for example, - "gs://folder/content.txt", and the content in it is extracted as a - text snippet. In TEXT\_SNIPPET case, the column content itself is - treated as to be imported text snippet. In both cases, the text - snippet must be up to 500 characters long. Sample rows: - TRAIN,"@freewrytin this is way too good for your product",2 TRAIN,"I - need this product so bad",3 TEST,"Thank you for this product.",4 - VALIDATE,gs://folder/content.txt,2 - - - For Tables: Either - [gcs\_source][google.cloud.automl.v1beta1.InputConfig.gcs\_source] or - - [bigquery\_source][google.cloud.automl.v1beta1.InputConfig.bigquery\_source] - can be used. All inputs is concatenated into a single - - [primary\_table][google.cloud.automl.v1beta1.TablesDatasetMetadata.primary\_table\_name] - For gcs\_source: CSV file(s), where the first row of the first file is - the header, containing unique column names. If the first row of a - subsequent file is the same as the header, then it is also treated as a - header. All other rows contain values for the corresponding columns. - Each .CSV file by itself must be 10GB or smaller, and their total size - must be 100GB or smaller. First three sample rows of a CSV file: - "Id","First Name","Last Name","Dob","Addresses" - - "1","John","Doe","1968-01-22","[{"status":"current","address":"123\_First\_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456\_Main\_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]" - - "2","Jane","Doe","1980-10-16","[{"status":"current","address":"789\_Any\_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321\_Main\_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]} - For bigquery\_source: An URI of a BigQuery table. The user data size of - the BigQuery table must be 100GB or smaller. An imported table must have - between 2 and 1,000 columns, inclusive, and between 1000 and 100,000,000 - rows, inclusive. There are at most 5 import data running in parallel. - Definitions: ML\_USE = "TRAIN" \| "VALIDATE" \| "TEST" \| "UNASSIGNED" - Describes how the given example (file) should be used for model - training. "UNASSIGNED" can be used when user has no preference. - GCS\_FILE\_PATH = A path to file on GCS, e.g. "gs://folder/image1.png". - LABEL = A display name of an object on an image, video etc., e.g. "dog". - Must be up to 32 characters long and can consist only of ASCII Latin - letters A-Z and a-z, underscores(\_), and ASCII digits 0-9. For each - label an AnnotationSpec is created which display\_name becomes the - label; AnnotationSpecs are given back in predictions. INSTANCE\_ID = A - positive integer that identifies a specific instance of a labeled entity - on an example. Used e.g. to track two cars on a video while being able - to tell apart which one is which. BOUNDING\_BOX = - VERTEX,VERTEX,VERTEX,VERTEX \| VERTEX,,,VERTEX,, A rectangle parallel to - the frame of the example (image, video). If 4 vertices are given they - are connected by edges in the order provided, if 2 are given they are - recognized as diagonally opposite vertices of the rectangle. VERTEX = - COORDINATE,COORDINATE First coordinate is horizontal (x), the second is - vertical (y). COORDINATE = A float in 0 to 1 range, relative to total - length of image or video in given dimension. For fractions the leading - non-decimal 0 can be omitted (i.e. 0.3 = .3). Point 0,0 is in top left. - TIME\_SEGMENT\_START = TIME\_OFFSET Expresses a beginning, inclusive, of - a time segment within an example that has a time dimension (e.g. video). - TIME\_SEGMENT\_END = TIME\_OFFSET Expresses an end, exclusive, of a time - segment within an example that has a time dimension (e.g. video). - TIME\_OFFSET = A number of seconds as measured from the start of an - example (e.g. video). Fractions are allowed, up to a microsecond - precision. "inf" is allowed, and it means the end of the example. - TEXT\_SNIPPET = A content of a text snippet, UTF-8 encoded, enclosed - within double quotes (""). SENTIMENT = An integer between 0 and - Dataset.text\_sentiment\_dataset\_metadata.sentiment\_max (inclusive). - Describes the ordinal of the sentiment - higher value means a more - positive sentiment. All the values are completely relative, i.e. neither - 0 needs to mean a negative or neutral sentiment nor sentiment\_max needs - to mean a positive one - it is just required that 0 is the least - positive sentiment in the data, and sentiment\_max is the most positive - one. The SENTIMENT shouldn't be confused with "score" or "magnitude" - from the previous Natural Language Sentiment Analysis API. All SENTIMENT - values between 0 and sentiment\_max must be represented in the imported - data. On prediction the same 0 to sentiment\_max range will be used. The - difference between neighboring sentiment values needs not to be uniform, - e.g. 1 and 2 may be similar whereas the difference between 2 and 3 may - be huge. - - Errors: If any of the provided CSV files can't be parsed or if more than - certain percent of CSV rows cannot be processed then the operation fails - and nothing is imported. Regardless of overall success or failure the - per-row failures, up to a certain count cap, is listed in - Operation.metadata.partial\_failures. - + “example” file (that is, image, video etc.) with identical content + (even if it had different GCS_FILE_PATH) is mentioned multiple times, + then its label, bounding boxes etc. are appended. The same file should + be always provided with the same ML_USE and GCS_FILE_PATH, if it is + not, then these values are nondeterministically selected from the + given ones. The formats are represented in EBNF with commas being + literal and with non-terminal symbols defined near the end of this + comment. The formats are: - For Image Classification: CSV file(s) + with each line in format: ML_USE,GCS_FILE_PATH,LABEL,LABEL,… + GCS_FILE_PATH leads to image of up to 30MB in size. Supported + extensions: .JPEG, .GIF, .PNG, .WEBP, .BMP, .TIFF, .ICO For + MULTICLASS classification type, at most one LABEL is allowed per + image. If an image has not yet been labeled, then it should be + mentioned just once with no LABEL. Some sample rows: + TRAIN,gs://folder/image1.jpg,daisy + TEST,gs://folder/image2.jpg,dandelion,tulip,rose + UNASSIGNED,gs://folder/image3.jpg,daisy + UNASSIGNED,gs://folder/image4.jpg - For Image Object Detection: CSV + file(s) with each line in format: + ML_USE,GCS_FILE_PATH,(LABEL,BOUNDING_BOX \| ,,,,,,,) GCS_FILE_PATH + leads to image of up to 30MB in size. Supported extensions: .JPEG, + .GIF, .PNG. Each image is assumed to be exhaustively labeled. The + minimum allowed BOUNDING_BOX edge length is 0.01, and no more than + 500 BOUNDING_BOX-es per image are allowed (one BOUNDING_BOX is + defined per line). If an image has not yet been labeled, then it + should be mentioned just once with no LABEL and the “,,,,,,,” in + place of the BOUNDING_BOX. For images which are known to not contain + any bounding boxes, they should be labelled explictly as + “NEGATIVE_IMAGE”, followed by “,,,,,,,” in place of the BOUNDING_BOX. + Sample rows: TRAIN,gs://folder/image1.png,car,0.1,0.1,,,0.3,0.3,, + TRAIN,gs://folder/image1.png,bike,.7,.6,,,.8,.9,, + UNASSIGNED,gs://folder/im2.png,car,0.1,0.1,0.2,0.1,0.2,0.3,0.1,0.3 + TEST,gs://folder/im3.png,,,,,,,,, + TRAIN,gs://folder/im4.png,NEGATIVE_IMAGE,,,,,,,,, - For Video + Classification: CSV file(s) with each line in format: + ML_USE,GCS_FILE_PATH where ML_USE VALIDATE value should not be used. + The GCS_FILE_PATH should lead to another .csv file which describes + examples that have given ML_USE, using the following row format: + GCS_FILE_PATH,(LABEL,TIME_SEGMENT_START,TIME_SEGMENT_END \| ,,) Here + GCS_FILE_PATH leads to a video of up to 50GB in size and up to 3h + duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI. + TIME_SEGMENT_START and TIME_SEGMENT_END must be within the length of + the video, and end has to be after the start. Any segment of a video + which has one or more labels on it, is considered a hard negative for + all other labels. Any segment with no labels on it is considered to + be unknown. If a whole video is unknown, then it shuold be mentioned + just once with “,,” in place of LABEL, + TIME_SEGMENT_START,TIME_SEGMENT_END. Sample top level CSV file: + TRAIN,gs://folder/train_videos.csv TEST,gs://folder/test_videos.csv + UNASSIGNED,gs://folder/other_videos.csv Sample rows of a CSV file for + a particular ML_USE: gs://folder/video1.avi,car,120,180.000021 + gs://folder/video1.avi,bike,150,180.000021 + gs://folder/vid2.avi,car,0,60.5 gs://folder/vid3.avi,,, - For Video + Object Tracking: CSV file(s) with each line in format: + ML_USE,GCS_FILE_PATH where ML_USE VALIDATE value should not be used. + The GCS_FILE_PATH should lead to another .csv file which describes + examples that have given ML_USE, using one of the following row + format: GCS_FILE_PATH,LABEL,[INSTANCE_ID],TIMESTAMP,BOUNDING_BOX or + GCS_FILE_PATH,,,,,,,,,, Here GCS_FILE_PATH leads to a video of up to + 50GB in size and up to 3h duration. Supported extensions: .MOV, + .MPEG4, .MP4, .AVI. Providing INSTANCE_IDs can help to obtain a + better model. When a specific labeled entity leaves the video frame, + and shows up afterwards it is not required, albeit preferable, that + the same INSTANCE_ID is given to it. TIMESTAMP must be within the + length of the video, the BOUNDING_BOX is assumed to be drawn on the + closest video’s frame to the TIMESTAMP. Any mentioned by the + TIMESTAMP frame is expected to be exhaustively labeled and no more + than 500 BOUNDING_BOX-es per frame are allowed. If a whole video is + unknown, then it should be mentioned just once with “,,,,,,,,,,” in + place of LABEL, [INSTANCE_ID],TIMESTAMP,BOUNDING_BOX. Sample top + level CSV file: TRAIN,gs://folder/train_videos.csv + TEST,gs://folder/test_videos.csv + UNASSIGNED,gs://folder/other_videos.csv Seven sample rows of a CSV + file for a particular ML_USE: + gs://folder/video1.avi,car,1,12.10,0.8,0.8,0.9,0.8,0.9,0.9,0.8,0.9 + gs://folder/video1.avi,car,1,12.90,0.4,0.8,0.5,0.8,0.5,0.9,0.4,0.9 + gs://folder/video1.avi,car,2,12.10,.4,.2,.5,.2,.5,.3,.4,.3 + gs://folder/video1.avi,car,2,12.90,.8,.2,,,.9,.3,, + gs://folder/video1.avi,bike,,12.50,.45,.45,,,.55,.55,, + gs://folder/video2.avi,car,1,0,.1,.9,,,.9,.1,, + gs://folder/video2.avi,,,,,,,,,,, - For Text Extraction: CSV file(s) + with each line in format: ML_USE,GCS_FILE_PATH GCS_FILE_PATH leads + to a .JSONL (that is, JSON Lines) file which either imports text + in-line or as documents. Any given .JSONL file must be 100MB or + smaller. The in-line .JSONL file contains, per line, a proto that + wraps a TextSnippet proto (in json representation) followed by one + or more AnnotationPayload protos (called annotations), which have + display_name and text_extraction detail populated. The given text + is expected to be annotated exhaustively, for example, if you look + for animals and text contains “dolphin” that is not labeled, then + “dolphin” is assumed to not be an animal. Any given text snippet + content must be 10KB or smaller, and also be UTF-8 NFC encoded + (ASCII already is). The document .JSONL file contains, per line, a + proto that wraps a Document proto. The Document proto must have + either document_text or input_config set. In document_text case, + the Document proto may also contain the spatial information of the + document, including layout, document dimension and page number. In + input_config case, only PDF documents are supported now, and each + document may be up to 2MB large. Currently, annotations on + documents cannot be specified at import. Three sample CSV rows: + TRAIN,gs://folder/file1.jsonl VALIDATE,gs://folder/file2.jsonl + TEST,gs://folder/file3.jsonl Sample in-line JSON Lines file for + entity extraction (presented here with artificial line breaks, but + the only actual line break is denoted by :raw-latex:`\n`).: { + “document”: { “document_text”: {“content”: “dog cat”} “layout”: [ { + “text_segment”: { “start_offset”: 0, “end_offset”: 3, }, + “page_number”: 1, “bounding_poly”: { “normalized_vertices”: [ {“x”: + 0.1, “y”: 0.1}, {“x”: 0.1, “y”: 0.3}, {“x”: 0.3, “y”: 0.3}, {“x”: + 0.3, “y”: 0.1}, ], }, “text_segment_type”: TOKEN, }, { + “text_segment”: { “start_offset”: 4, “end_offset”: 7, }, + “page_number”: 1, “bounding_poly”: { “normalized_vertices”: [ {“x”: + 0.4, “y”: 0.1}, {“x”: 0.4, “y”: 0.3}, {“x”: 0.8, “y”: 0.3}, {“x”: + 0.8, “y”: 0.1}, ], }, “text_segment_type”: TOKEN, }], + “document_dimensions”: { “width”: 8.27, “height”: 11.69, “unit”: + INCH, } “page_count”: 1, }, “annotations”: [ { “display_name”: + “animal”, “text_extraction”: {“text_segment”: {“start_offset”: 0, + “end_offset”: 3}} }, { “display_name”: “animal”, “text_extraction”: + {“text_segment”: {“start_offset”: 4, “end_offset”: 7}} } ], }:raw- + latex:`\n { "text_snippet": { + "content": "This dog is good." }, + "annotations": [ { "display_name": + "animal", "text_extraction": { + "text_segment": {"start_offset": 5, "end_offset": 8} } + } ] }` Sample document JSON Lines file (presented + here with artificial line breaks, but the only actual line break is + denoted by :raw-latex:`\n`).: { “document”: { “input_config”: { + “gcs_source”: { “input_uris”: [ “gs://folder/document1.pdf” ] } } } + }:raw-latex:`\n { "document": { + "input_config": { "gcs_source": { "input_uris": [ + "gs://folder/document2.pdf" ] } } + } }` - For Text Classification: CSV file(s) with each line + in format: ML_USE,(TEXT_SNIPPET \| GCS_FILE_PATH),LABEL,LABEL,… + TEXT_SNIPPET and GCS_FILE_PATH are distinguished by a pattern. If + the column content is a valid gcs file path, i.e. prefixed by + “gs://”, it will be treated as a GCS_FILE_PATH, else if the content + is enclosed within double quotes ("“), it is treated as a + TEXT_SNIPPET. In the GCS_FILE_PATH case, the path must lead to a + .txt file with UTF-8 encoding, for + example,”gs://folder/content.txt“, and the content in it is + extracted as a text snippet. In TEXT_SNIPPET case, the column + content excluding quotes is treated as to be imported text snippet. + In both cases, the text snippet/file size must be within 128kB. + Maximum 100 unique labels are allowed per CSV row. Sample rows: + TRAIN,”They have bad food and very rude“,RudeService,BadFood + TRAIN,gs://folder/content.txt,SlowService TEST,”Typically always bad + service there.“,RudeService VALIDATE,”Stomach ache to go.",BadFood - + For Text Sentiment: CSV file(s) with each line in format: + ML_USE,(TEXT_SNIPPET \| GCS_FILE_PATH),SENTIMENT TEXT_SNIPPET and + GCS_FILE_PATH are distinguished by a pattern. If the column content + is a valid gcs file path, that is, prefixed by “gs://”, it is treated + as a GCS_FILE_PATH, otherwise it is treated as a TEXT_SNIPPET. In the + GCS_FILE_PATH case, the path must lead to a .txt file with UTF-8 + encoding, for example, “gs://folder/content.txt”, and the content in + it is extracted as a text snippet. In TEXT_SNIPPET case, the column + content itself is treated as to be imported text snippet. In both + cases, the text snippet must be up to 500 characters long. Sample + rows: TRAIN,“@freewrytin this is way too good for your product”,2 + TRAIN,“I need this product so bad”,3 TEST,“Thank you for this + product.”,4 VALIDATE,gs://folder/content.txt,2 - For Tables: Either + [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] or [ + bigquery_source][google.cloud.automl.v1beta1.InputConfig.bigquery_sour + ce] can be used. All inputs is concatenated into a single [primary_ta + ble][google.cloud.automl.v1beta1.TablesDatasetMetadata.primary_table_n + ame] For gcs_source: CSV file(s), where the first row of the first + file is the header, containing unique column names. If the first row + of a subsequent file is the same as the header, then it is also + treated as a header. All other rows contain values for the + corresponding columns. Each .CSV file by itself must be 10GB or + smaller, and their total size must be 100GB or smaller. First three + sample rows of a CSV file: “Id”,“First Name”,“Last + Name”,“Dob”,“Addresses” “1”,“John”,“Doe”,“1968-01-22”,“[{"status":"cu + rrent","address":"123_First_Avenue","city":"Seattle","state":"WA","zip + ":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Mai + n_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears" + :"5"}]” “2”,“Jane”,“Doe”,“1980-10-16”,“[{"status":"current","address" + :"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOf + Years":"2"},{"status":"previous","address":"321_Main_Street","city":"H + oboken","state":"NJ","zip":"44444","numberOfYears":"3"}]} For + bigquery_source: An URI of a BigQuery table. The user data size of the + BigQuery table must be 100GB or smaller. An imported table must have + between 2 and 1,000 columns, inclusive, and between 1000 and + 100,000,000 rows, inclusive. There are at most 5 import data running + in parallel. Definitions: ML_USE =”TRAIN" \| “VALIDATE” \| “TEST” \| + “UNASSIGNED” Describes how the given example (file) should be used for + model training. “UNASSIGNED” can be used when user has no preference. + GCS_FILE_PATH = A path to file on GCS, e.g. “gs://folder/image1.png”. + LABEL = A display name of an object on an image, video etc., e.g. + “dog”. Must be up to 32 characters long and can consist only of ASCII + Latin letters A-Z and a-z, underscores(_), and ASCII digits 0-9. For + each label an AnnotationSpec is created which display_name becomes the + label; AnnotationSpecs are given back in predictions. INSTANCE_ID = A + positive integer that identifies a specific instance of a labeled + entity on an example. Used e.g. to track two cars on a video while + being able to tell apart which one is which. BOUNDING_BOX = + VERTEX,VERTEX,VERTEX,VERTEX \| VERTEX,,,VERTEX,, A rectangle parallel + to the frame of the example (image, video). If 4 vertices are given + they are connected by edges in the order provided, if 2 are given they + are recognized as diagonally opposite vertices of the rectangle. + VERTEX = COORDINATE,COORDINATE First coordinate is horizontal (x), the + second is vertical (y). COORDINATE = A float in 0 to 1 range, relative + to total length of image or video in given dimension. For fractions + the leading non-decimal 0 can be omitted (i.e. 0.3 = .3). Point 0,0 is + in top left. TIME_SEGMENT_START = TIME_OFFSET Expresses a beginning, + inclusive, of a time segment within an example that has a time + dimension (e.g. video). TIME_SEGMENT_END = TIME_OFFSET Expresses an + end, exclusive, of a time segment within an example that has a time + dimension (e.g. video). TIME_OFFSET = A number of seconds as measured + from the start of an example (e.g. video). Fractions are allowed, up + to a microsecond precision. “inf” is allowed, and it means the end of + the example. TEXT_SNIPPET = A content of a text snippet, UTF-8 + encoded, enclosed within double quotes ("“). SENTIMENT = An integer + between 0 and Dataset.text_sentiment_dataset_metadata.sentiment_max + (inclusive). Describes the ordinal of the sentiment - higher value + means a more positive sentiment. All the values are completely + relative, i.e. neither 0 needs to mean a negative or neutral sentiment + nor sentiment_max needs to mean a positive one - it is just required + that 0 is the least positive sentiment in the data, and sentiment_max + is the most positive one. The SENTIMENT shouldn’t be confused + with”score" or “magnitude” from the previous Natural Language + Sentiment Analysis API. All SENTIMENT values between 0 and + sentiment_max must be represented in the imported data. On prediction + the same 0 to sentiment_max range will be used. The difference between + neighboring sentiment values needs not to be uniform, e.g. 1 and 2 may + be similar whereas the difference between 2 and 3 may be huge. + Errors: If any of the provided CSV files can’t be parsed or if more + than certain percent of CSV rows cannot be processed then the + operation fails and nothing is imported. Regardless of overall success + or failure the per-row failures, up to a certain count cap, is listed + in Operation.metadata.partial_failures. Attributes: source: The source of the input. gcs_source: The Google Cloud Storage location for the input content. In - ImportData, the gcs\_source points to a csv with structure + ImportData, the gcs_source points to a csv with structure described in the comment. bigquery_source: The BigQuery location for the input content. @@ -1167,11 +1219,11 @@ characters long. - For Tables: ``schema_inference_version`` - (integer) Required. The version of the algorithm that should be used for the initial inference of the schema - (columns' DataTypes) of the table the data is being - imported into. Allowed values: "1". + (columns’ DataTypes) of the table the data is being + imported into. Allowed values: “1”. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.InputConfig) - ), + }, ) _sym_db.RegisterMessage(InputConfig) _sym_db.RegisterMessage(InputConfig.ParamsEntry) @@ -1179,140 +1231,118 @@ BatchPredictInputConfig = _reflection.GeneratedProtocolMessageType( "BatchPredictInputConfig", (_message.Message,), - dict( - DESCRIPTOR=_BATCHPREDICTINPUTCONFIG, - __module__="google.cloud.automl_v1beta1.proto.io_pb2", - __doc__="""Input configuration for BatchPredict Action. - - The format of input depends on the ML problem of the model used for - prediction. As input source the - [gcs\_source][google.cloud.automl.v1beta1.InputConfig.gcs\_source] is - expected, unless specified otherwise. - - The formats are represented in EBNF with commas being literal and with - non-terminal symbols defined near the end of this comment. The formats - are: - - - For Image Classification: CSV file(s) with each line having just a - single column: GCS\_FILE\_PATH which leads to image of up to 30MB in - size. Supported extensions: .JPEG, .GIF, .PNG. This path is treated - as the ID in the Batch predict output. Three sample rows: - gs://folder/image1.jpeg gs://folder/image2.gif gs://folder/image3.png - - - For Image Object Detection: CSV file(s) with each line having just a - single column: GCS\_FILE\_PATH which leads to image of up to 30MB in - size. Supported extensions: .JPEG, .GIF, .PNG. This path is treated - as the ID in the Batch predict output. Three sample rows: - gs://folder/image1.jpeg gs://folder/image2.gif gs://folder/image3.png + { + "DESCRIPTOR": _BATCHPREDICTINPUTCONFIG, + "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", + "__doc__": """Input configuration for BatchPredict Action. The format of input + depends on the ML problem of the model used for prediction. As input + source the + [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] is + expected, unless specified otherwise. The formats are represented in + EBNF with commas being literal and with non-terminal symbols defined + near the end of this comment. The formats are: - For Image + Classification: CSV file(s) with each line having just a single + column: GCS_FILE_PATH which leads to image of up to 30MB in size. + Supported extensions: .JPEG, .GIF, .PNG. This path is treated as + the ID in the Batch predict output. Three sample rows: + gs://folder/image1.jpeg gs://folder/image2.gif gs://folder/image3.png + - For Image Object Detection: CSV file(s) with each line having just + a single column: GCS_FILE_PATH which leads to image of up to 30MB + in size. Supported extensions: .JPEG, .GIF, .PNG. This path is + treated as the ID in the Batch predict output. Three sample rows: + gs://folder/image1.jpeg gs://folder/image2.gif gs://folder/image3.png - For Video Classification: CSV file(s) with each line in format: - GCS\_FILE\_PATH,TIME\_SEGMENT\_START,TIME\_SEGMENT\_END - GCS\_FILE\_PATH leads to video of up to 50GB in size and up to 3h - duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI. - TIME\_SEGMENT\_START and TIME\_SEGMENT\_END must be within the length - of the video, and end has to be after the start. Three sample rows: - gs://folder/video1.mp4,10,40 gs://folder/video1.mp4,20,60 - gs://folder/vid2.mov,0,inf - - - For Video Object Tracking: CSV file(s) with each line in format: - GCS\_FILE\_PATH,TIME\_SEGMENT\_START,TIME\_SEGMENT\_END - GCS\_FILE\_PATH leads to video of up to 50GB in size and up to 3h - duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI. - TIME\_SEGMENT\_START and TIME\_SEGMENT\_END must be within the length - of the video, and end has to be after the start. Three sample rows: - gs://folder/video1.mp4,10,240 gs://folder/video1.mp4,300,360 - gs://folder/vid2.mov,0,inf - - For Text Classification: CSV file(s) with each line having just a - single column: GCS\_FILE\_PATH \| TEXT\_SNIPPET Any given text file - can have size upto 128kB. Any given text snippet content must have - 60,000 characters or less. Three sample rows: gs://folder/text1.txt - "Some text content to predict" gs://folder/text3.pdf Supported file - extensions: .txt, .pdf - - - For Text Sentiment: CSV file(s) with each line having just a single - column: GCS\_FILE\_PATH \| TEXT\_SNIPPET Any given text file can have - size upto 128kB. Any given text snippet content must have 500 - characters or less. Three sample rows: gs://folder/text1.txt "Some - text content to predict" gs://folder/text3.pdf Supported file - extensions: .txt, .pdf - - - For Text Extraction .JSONL (i.e. JSON Lines) file(s) which either - provide text in-line or as documents (for a single BatchPredict call - only one of the these formats may be used). The in-line .JSONL - file(s) contain per line a proto that wraps a temporary user-assigned - TextSnippet ID (string up to 2000 characters long) called "id", a - TextSnippet proto (in json representation) and zero or more - TextFeature protos. Any given text snippet content must have 30,000 - characters or less, and also be UTF-8 NFC encoded (ASCII already is). - The IDs provided should be unique. The document .JSONL file(s) - contain, per line, a proto that wraps a Document proto with - input\_config set. Only PDF documents are supported now, and each - document must be up to 2MB large. Any given .JSONL file must be 100MB - or smaller, and no more than 20 files may be given. - - - For Tables: Either - [gcs\_source][google.cloud.automl.v1beta1.InputConfig.gcs\_source] or - - [bigquery\_source][google.cloud.automl.v1beta1.InputConfig.bigquery\_source]. - GCS case: CSV file(s), each by itself 10GB or smaller and total size - must be 100GB or smaller, where first file must have a header containing - column names. If the first row of a subsequent file is the same as the - header, then it is also treated as a header. All other rows contain - values for the corresponding columns. The column names must contain the - model's - - [input\_feature\_column\_specs'][google.cloud.automl.v1beta1.TablesModelMetadata.input\_feature\_column\_specs] - - [display\_name-s][google.cloud.automl.v1beta1.ColumnSpec.display\_name] - (order doesn't matter). The columns corresponding to the model's input + GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END GCS_FILE_PATH leads + to video of up to 50GB in size and up to 3h duration. Supported + extensions: .MOV, .MPEG4, .MP4, .AVI. TIME_SEGMENT_START and + TIME_SEGMENT_END must be within the length of the video, and end has + to be after the start. Three sample rows: + gs://folder/video1.mp4,10,40 gs://folder/video1.mp4,20,60 + gs://folder/vid2.mov,0,inf - For Video Object Tracking: CSV file(s) + with each line in format: + GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END GCS_FILE_PATH leads + to video of up to 50GB in size and up to 3h duration. Supported + extensions: .MOV, .MPEG4, .MP4, .AVI. TIME_SEGMENT_START and + TIME_SEGMENT_END must be within the length of the video, and end has + to be after the start. Three sample rows: + gs://folder/video1.mp4,10,240 gs://folder/video1.mp4,300,360 + gs://folder/vid2.mov,0,inf - For Text Classification: CSV file(s) + with each line having just a single column: GCS_FILE_PATH \| + TEXT_SNIPPET Any given text file can have size upto 128kB. Any + given text snippet content must have 60,000 characters or less. + Three sample rows: gs://folder/text1.txt “Some text content to + predict” gs://folder/text3.pdf Supported file extensions: .txt, + .pdf - For Text Sentiment: CSV file(s) with each line having just a + single column: GCS_FILE_PATH \| TEXT_SNIPPET Any given text file + can have size upto 128kB. Any given text snippet content must have + 500 characters or less. Three sample rows: gs://folder/text1.txt + “Some text content to predict” gs://folder/text3.pdf Supported file + extensions: .txt, .pdf - For Text Extraction .JSONL (i.e. JSON + Lines) file(s) which either provide text in-line or as documents + (for a single BatchPredict call only one of the these formats may + be used). The in-line .JSONL file(s) contain per line a proto that + wraps a temporary user-assigned TextSnippet ID (string up to 2000 + characters long) called “id”, a TextSnippet proto (in json + representation) and zero or more TextFeature protos. Any given text + snippet content must have 30,000 characters or less, and also be + UTF-8 NFC encoded (ASCII already is). The IDs provided should be + unique. The document .JSONL file(s) contain, per line, a proto that + wraps a Document proto with input_config set. Only PDF documents + are supported now, and each document must be up to 2MB large. Any + given .JSONL file must be 100MB or smaller, and no more than 20 + files may be given. - For Tables: Either + [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] or [ + bigquery_source][google.cloud.automl.v1beta1.InputConfig.bigquery_sour + ce]. GCS case: CSV file(s), each by itself 10GB or smaller and total + size must be 100GB or smaller, where first file must have a header + containing column names. If the first row of a subsequent file is the + same as the header, then it is also treated as a header. All other + rows contain values for the corresponding columns. The column names + must contain the model’s [input_feature_column_specs’][google.cloud.a + utoml.v1beta1.TablesModelMetadata.input_feature_column_specs] + [display_name-s][google.cloud.automl.v1beta1.ColumnSpec.display_name] + (order doesn’t matter). The columns corresponding to the model’s input feature column specs must contain values compatible with the column - spec's data types. Prediction on all the rows, i.e. the CSV lines, will - be attempted. For FORECASTING - - [prediction\_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction\_type]: - all columns having - - [TIME\_SERIES\_AVAILABLE\_PAST\_ONLY][google.cloud.automl.v1beta1.ColumnSpec.ForecastingMetadata.ColumnType] - type will be ignored. First three sample rows of a CSV file: "First - Name","Last Name","Dob","Addresses" - - "John","Doe","1968-01-22","[{"status":"current","address":"123\_First\_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456\_Main\_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]" - - "Jane","Doe","1980-10-16","[{"status":"current","address":"789\_Any\_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321\_Main\_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]} - BigQuery case: An URI of a BigQuery table. The user data size of the - BigQuery table must be 100GB or smaller. The column names must contain - the model's - - [input\_feature\_column\_specs'][google.cloud.automl.v1beta1.TablesModelMetadata.input\_feature\_column\_specs] - - [display\_name-s][google.cloud.automl.v1beta1.ColumnSpec.display\_name] - (order doesn't matter). The columns corresponding to the model's input + spec’s data types. Prediction on all the rows, i.e. the CSV lines, + will be attempted. For FORECASTING [prediction_type][google.cloud.aut + oml.v1beta1.TablesModelMetadata.prediction_type]: all columns having + [TIME_SERIES_AVAILABLE_PAST_ONLY][google.cloud.automl.v1beta1.ColumnSp + ec.ForecastingMetadata.ColumnType] type will be ignored. First three + sample rows of a CSV file: “First Name”,“Last Name”,“Dob”,“Addresses” + “John”,“Doe”,“1968-01-22”,“[{"status":"current","address":"123_First_A + venue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1" + },{"status":"previous","address":"456_Main_Street","city":"Portland"," + state":"OR","zip":"22222","numberOfYears":"5"}]” “Jane”,“Doe”,“1980-1 + 0-16”,"[{“status”:“current”,“address”:“789_Any_Avenue”,“city”:“Albany” + ,“state”:“NY”,“zip”:“33333”,“numberOfYears”:“2”},{“status”:“previous”, + “address”:“321_Main_Street”,“city”:“Hoboken”,“state”:“NJ”,“zip”:“44444 + ”,“numberOfYears”:“3”}]} BigQuery case: An URI of a BigQuery table. + The user data size of the BigQuery table must be 100GB or smaller. The + column names must contain the model’s [input_feature_column_specs’][g + oogle.cloud.automl.v1beta1.TablesModelMetadata.input_feature_column_sp + ecs] + [display_name-s][google.cloud.automl.v1beta1.ColumnSpec.display_name] + (order doesn’t matter). The columns corresponding to the model’s input feature column specs must contain values compatible with the column - spec's data types. Prediction on all the rows of the table will be - attempted. For FORECASTING - - [prediction\_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction\_type]: - all columns having - - [TIME\_SERIES\_AVAILABLE\_PAST\_ONLY][google.cloud.automl.v1beta1.ColumnSpec.ForecastingMetadata.ColumnType] - type will be ignored. - - Definitions: GCS\_FILE\_PATH = A path to file on GCS, e.g. - "gs://folder/video.avi". TEXT\_SNIPPET = A content of a text snippet, - UTF-8 encoded, enclosed within double quotes ("") TIME\_SEGMENT\_START = - TIME\_OFFSET Expresses a beginning, inclusive, of a time segment within - an example that has a time dimension (e.g. video). TIME\_SEGMENT\_END = - TIME\_OFFSET Expresses an end, exclusive, of a time segment within an - example that has a time dimension (e.g. video). TIME\_OFFSET = A number - of seconds as measured from the start of an example (e.g. video). - Fractions are allowed, up to a microsecond precision. "inf" is allowed - and it means the end of the example. - - Errors: If any of the provided CSV files can't be parsed or if more than - certain percent of CSV rows cannot be processed then the operation fails - and prediction does not happen. Regardless of overall success or failure - the per-row failures, up to a certain count cap, will be listed in - Operation.metadata.partial\_failures. - + spec’s data types. Prediction on all the rows of the table will be + attempted. For FORECASTING [prediction_type][google.cloud.automl.v1be + ta1.TablesModelMetadata.prediction_type]: all columns having [TIME_SE + RIES_AVAILABLE_PAST_ONLY][google.cloud.automl.v1beta1.ColumnSpec.Forec + astingMetadata.ColumnType] type will be ignored. Definitions: + GCS_FILE_PATH = A path to file on GCS, e.g. “gs://folder/video.avi”. + TEXT_SNIPPET = A content of a text snippet, UTF-8 encoded, enclosed + within double quotes ("“) TIME_SEGMENT_START = TIME_OFFSET Expresses a + beginning, inclusive, of a time segment within an example that has a + time dimension (e.g. video). TIME_SEGMENT_END = TIME_OFFSET Expresses + an end, exclusive, of a time segment within an example that has a time + dimension (e.g. video). TIME_OFFSET = A number of seconds as measured + from the start of an example (e.g. video). Fractions are allowed, up + to a microsecond precision.”inf" is allowed and it means the end of + the example. Errors: If any of the provided CSV files can’t be parsed + or if more than certain percent of CSV rows cannot be processed then + the operation fails and prediction does not happen. Regardless of + overall success or failure the per-row failures, up to a certain count + cap, will be listed in Operation.metadata.partial_failures. Attributes: source: @@ -1323,20 +1353,19 @@ The BigQuery location for the input content. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.BatchPredictInputConfig) - ), + }, ) _sym_db.RegisterMessage(BatchPredictInputConfig) DocumentInputConfig = _reflection.GeneratedProtocolMessageType( "DocumentInputConfig", (_message.Message,), - dict( - DESCRIPTOR=_DOCUMENTINPUTCONFIG, - __module__="google.cloud.automl_v1beta1.proto.io_pb2", - __doc__="""Input configuration of a + { + "DESCRIPTOR": _DOCUMENTINPUTCONFIG, + "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", + "__doc__": """Input configuration of a [Document][google.cloud.automl.v1beta1.Document]. - Attributes: gcs_source: The Google Cloud Storage location of the document file. Only a @@ -1344,42 +1373,36 @@ Supported extensions: .PDF. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.DocumentInputConfig) - ), + }, ) _sym_db.RegisterMessage(DocumentInputConfig) OutputConfig = _reflection.GeneratedProtocolMessageType( "OutputConfig", (_message.Message,), - dict( - DESCRIPTOR=_OUTPUTCONFIG, - __module__="google.cloud.automl_v1beta1.proto.io_pb2", - __doc__="""\* For Translation: CSV file ``translation.csv``, with - each line in format: ML\_USE,GCS\_FILE\_PATH GCS\_FILE\_PATH leads to a - .TSV file which describes examples that have given ML\_USE, using the - following row format per line: TEXT\_SNIPPET (in source language) - \\tTEXT\_SNIPPET (in target language) - - - For Tables: Output depends on whether the dataset was imported from - GCS or BigQuery. GCS case: - - [gcs\_destination][google.cloud.automl.v1beta1.OutputConfig.gcs\_destination] - must be set. Exported are CSV file(s) ``tables_1.csv``, - ``tables_2.csv``,...,\ ``tables_N.csv`` with each having as header line - the table's column names, and all other lines contain values for the - header columns. BigQuery case: - - [bigquery\_destination][google.cloud.automl.v1beta1.OutputConfig.bigquery\_destination] - pointing to a BigQuery project must be set. In the given project a new - dataset will be created with name - - ``export_data__`` - where will be made BigQuery-dataset-name compatible (e.g. most special - characters will become underscores), and timestamp will be in - YYYY\_MM\_DDThh\_mm\_ss\_sssZ "based on ISO-8601" format. In that - dataset a new table called ``primary_table`` will be created, and filled - with precisely the same data as this obtained on import. - + { + "DESCRIPTOR": _OUTPUTCONFIG, + "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", + "__doc__": """\* For Translation: CSV file ``translation.csv``, with each line in + format: ML_USE,GCS_FILE_PATH GCS_FILE_PATH leads to a .TSV file which + describes examples that have given ML_USE, using the following row + format per line: TEXT_SNIPPET (in source language) :raw-latex:`\t + `TEXT_SNIPPET (in target language) - For Tables: Output depends on + whether the dataset was imported from GCS or BigQuery. GCS case: [ + gcs_destination][google.cloud.automl.v1beta1.OutputConfig.gcs_destinat + ion] must be set. Exported are CSV file(s) ``tables_1.csv``, + ``tables_2.csv``,…,\ ``tables_N.csv`` with each having as header line + the table’s column names, and all other lines contain values for the + header columns. BigQuery case: [bigquery_destination][google.cloud.au + toml.v1beta1.OutputConfig.bigquery_destination] pointing to a BigQuery + project must be set. In the given project a new dataset will be + created with name ``export_data__`` where will be made BigQuery- + dataset-name compatible (e.g. most special characters will become + underscores), and timestamp will be in YYYY_MM_DDThh_mm_ss_sssZ “based + on ISO-8601” format. In that dataset a new table called + ``primary_table`` will be created, and filled with precisely the same + data as this obtained on import. Attributes: destination: @@ -1388,294 +1411,235 @@ The Google Cloud Storage location where the output is to be written to. For Image Object Detection, Text Extraction, Video Classification and Tables, in the given directory a new - directory will be created with name: export\_data-- where + directory will be created with name: export_data-- where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. All export output will be written into that directory. bigquery_destination: The BigQuery location where the output is to be written to. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.OutputConfig) - ), + }, ) _sym_db.RegisterMessage(OutputConfig) BatchPredictOutputConfig = _reflection.GeneratedProtocolMessageType( "BatchPredictOutputConfig", (_message.Message,), - dict( - DESCRIPTOR=_BATCHPREDICTOUTPUTCONFIG, - __module__="google.cloud.automl_v1beta1.proto.io_pb2", - __doc__="""Output configuration for BatchPredict Action. - - As destination the - - [gcs\_destination][google.cloud.automl.v1beta1.BatchPredictOutputConfig.gcs\_destination] - must be set unless specified otherwise for a domain. If gcs\_destination - is set then in the given directory a new directory is created. Its name - will be "prediction--", where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ - ISO-8601 format. The contents of it depends on the ML problem the - predictions are made for. - - - For Image Classification: In the created directory files - ``image_classification_1.jsonl``, - ``image_classification_2.jsonl``,...,\ ``image_classification_N.jsonl`` - will be created, where N may be 1, and depends on the total number of - the successfully predicted images and annotations. A single image - will be listed only once with all its annotations, and its - annotations will never be split across files. Each .JSONL file will - contain, per line, a JSON representation of a proto that wraps - image's "ID" : "" followed by a list of zero or more - AnnotationPayload protos (called annotations), which have - classification detail populated. If prediction for any image failed - (partially or completely), then an additional ``errors_1.jsonl``, - ``errors_2.jsonl``,..., ``errors_N.jsonl`` files will be created (N - depends on total number of failed predictions). These files will have - a JSON representation of a proto that wraps the same "ID" : "" but - here followed by exactly one - - ```google.rpc.Status`` `__ - containing only ``code`` and ``message``\ fields. - - - For Image Object Detection: In the created directory files - ``image_object_detection_1.jsonl``, - ``image_object_detection_2.jsonl``,...,\ ``image_object_detection_N.jsonl`` - will be created, where N may be 1, and depends on the total number of - the successfully predicted images and annotations. Each .JSONL file - will contain, per line, a JSON representation of a proto that wraps - image's "ID" : "" followed by a list of zero or more - AnnotationPayload protos (called annotations), which have - image\_object\_detection detail populated. A single image will be - listed only once with all its annotations, and its annotations will - never be split across files. If prediction for any image failed - (partially or completely), then additional ``errors_1.jsonl``, - ``errors_2.jsonl``,..., ``errors_N.jsonl`` files will be created (N - depends on total number of failed predictions). These files will have - a JSON representation of a proto that wraps the same "ID" : "" but - here followed by exactly one - - ```google.rpc.Status`` `__ - containing only ``code`` and ``message``\ fields. \* For Video - Classification: In the created directory a video\_classification.csv - file, and a .JSON file per each video classification requested in the - input (i.e. each line in given CSV(s)), will be created. - - :: - - The format of video_classification.csv is: - - GCS\_FILE\_PATH,TIME\_SEGMENT\_START,TIME\_SEGMENT\_END,JSON\_FILE\_NAME,STATUS - where: GCS\_FILE\_PATH,TIME\_SEGMENT\_START,TIME\_SEGMENT\_END = matches - 1 to 1 the prediction input lines (i.e. video\_classification.csv has - precisely the same number of lines as the prediction input had.) - JSON\_FILE\_NAME = Name of .JSON file in the output directory, which - contains prediction responses for the video time segment. STATUS = "OK" - if prediction completed successfully, or an error code with message - otherwise. If STATUS is not "OK" then the .JSON file for that line may - not exist or be empty. - - :: - - Each .JSON file, assuming STATUS is "OK", will contain a list of - AnnotationPayload protos in JSON format, which are the predictions - for the video time segment the file is assigned to in the - video_classification.csv. All AnnotationPayload protos will have - video_classification field set, and will be sorted by - video_classification.type field (note that the returned types are - governed by `classifaction_types` parameter in - [PredictService.BatchPredictRequest.params][]). - - - For Video Object Tracking: In the created directory a - video\_object\_tracking.csv file will be created, and multiple files - video\_object\_trackinng\_1.json, - video\_object\_trackinng\_2.json,..., - video\_object\_trackinng\_N.json, where N is the number of requests - in the input (i.e. the number of lines in given CSV(s)). - - :: - - The format of video_object_tracking.csv is: - - GCS\_FILE\_PATH,TIME\_SEGMENT\_START,TIME\_SEGMENT\_END,JSON\_FILE\_NAME,STATUS - where: GCS\_FILE\_PATH,TIME\_SEGMENT\_START,TIME\_SEGMENT\_END = matches - 1 to 1 the prediction input lines (i.e. video\_object\_tracking.csv has - precisely the same number of lines as the prediction input had.) - JSON\_FILE\_NAME = Name of .JSON file in the output directory, which - contains prediction responses for the video time segment. STATUS = "OK" - if prediction completed successfully, or an error code with message - otherwise. If STATUS is not "OK" then the .JSON file for that line may - not exist or be empty. - - :: - - Each .JSON file, assuming STATUS is "OK", will contain a list of - AnnotationPayload protos in JSON format, which are the predictions - for each frame of the video time segment the file is assigned to in - video_object_tracking.csv. All AnnotationPayload protos will have - video_object_tracking field set. - - - For Text Classification: In the created directory files - ``text_classification_1.jsonl``, - ``text_classification_2.jsonl``,...,\ ``text_classification_N.jsonl`` - will be created, where N may be 1, and depends on the total number of - inputs and annotations found. - - :: - - Each .JSONL file will contain, per line, a JSON representation of a - proto that wraps input text snippet or input text file and a list of - zero or more AnnotationPayload protos (called annotations), which - have classification detail populated. A single text snippet or file - will be listed only once with all its annotations, and its - annotations will never be split across files. - - If prediction for any text snippet or file failed (partially or - completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,..., - `errors_N.jsonl` files will be created (N depends on total number of - failed predictions). These files will have a JSON representation of a - proto that wraps input text snippet or input text file followed by - exactly one - - ```google.rpc.Status`` `__ - containing only ``code`` and ``message``. - - - For Text Sentiment: In the created directory files - ``text_sentiment_1.jsonl``, - ``text_sentiment_2.jsonl``,...,\ ``text_sentiment_N.jsonl`` will be - created, where N may be 1, and depends on the total number of inputs - and annotations found. - - :: - - Each .JSONL file will contain, per line, a JSON representation of a - proto that wraps input text snippet or input text file and a list of - zero or more AnnotationPayload protos (called annotations), which - have text_sentiment detail populated. A single text snippet or file - will be listed only once with all its annotations, and its - annotations will never be split across files. - - If prediction for any text snippet or file failed (partially or - completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,..., - `errors_N.jsonl` files will be created (N depends on total number of - failed predictions). These files will have a JSON representation of a - proto that wraps input text snippet or input text file followed by - exactly one - - ```google.rpc.Status`` `__ - containing only ``code`` and ``message``. - - - For Text Extraction: In the created directory files - ``text_extraction_1.jsonl``, - ``text_extraction_2.jsonl``,...,\ ``text_extraction_N.jsonl`` will be - created, where N may be 1, and depends on the total number of inputs - and annotations found. The contents of these .JSONL file(s) depend on - whether the input used inline text, or documents. If input was - inline, then each .JSONL file will contain, per line, a JSON - representation of a proto that wraps given in request text snippet's - "id" (if specified), followed by input text snippet, and a list of - zero or more AnnotationPayload protos (called annotations), which - have text\_extraction detail populated. A single text snippet will be - listed only once with all its annotations, and its annotations will - never be split across files. If input used documents, then each - .JSONL file will contain, per line, a JSON representation of a proto - that wraps given in request document proto, followed by its OCR-ed - representation in the form of a text snippet, finally followed by a - list of zero or more AnnotationPayload protos (called annotations), - which have text\_extraction detail populated and refer, via their - indices, to the OCR-ed text snippet. A single document (and its text - snippet) will be listed only once with all its annotations, and its - annotations will never be split across files. If prediction for any - text snippet failed (partially or completely), then additional - ``errors_1.jsonl``, ``errors_2.jsonl``,..., ``errors_N.jsonl`` files - will be created (N depends on total number of failed predictions). - These files will have a JSON representation of a proto that wraps - either the "id" : "" (in case of inline) or the document proto (in - case of document) but here followed by exactly one - - ```google.rpc.Status`` `__ - containing only ``code`` and ``message``. - - - For Tables: Output depends on whether - - [gcs\_destination][google.cloud.automl.v1beta1.BatchPredictOutputConfig.gcs\_destination] - or - - [bigquery\_destination][google.cloud.automl.v1beta1.BatchPredictOutputConfig.bigquery\_destination] - is set (either is allowed). GCS case: In the created directory files - ``tables_1.csv``, ``tables_2.csv``,..., ``tables_N.csv`` will be - created, where N may be 1, and depends on the total number of the - successfully predicted rows. For all CLASSIFICATION - - [prediction\_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction\_type]: - Each .csv file will contain a header, listing all columns' - - [display\_name-s][google.cloud.automl.v1beta1.ColumnSpec.display\_name] - given on input followed by M target column names in the format of - - "<[target\_column\_specs][google.cloud.automl.v1beta1.TablesModelMetadata.target\_column\_spec] - - [display\_name][google.cloud.automl.v1beta1.ColumnSpec.display\_name]>\_\_score" - where M is the number of distinct target values, i.e. number of distinct - values in the target column of the table used to train the model. - Subsequent lines will contain the respective values of successfully - predicted rows, with the last, i.e. the target, columns having the - corresponding prediction + { + "DESCRIPTOR": _BATCHPREDICTOUTPUTCONFIG, + "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", + "__doc__": """Output configuration for BatchPredict Action. As destination the [gc + s_destination][google.cloud.automl.v1beta1.BatchPredictOutputConfig.gc + s_destination] must be set unless specified otherwise for a domain. If + gcs_destination is set then in the given directory a new directory is + created. Its name will be “prediction--”, where timestamp is in YYYY- + MM-DDThh:mm:ss.sssZ ISO-8601 format. The contents of it depends on the + ML problem the predictions are made for. - For Image Classification: + In the created directory files ``image_classification_1.jsonl``, + ``image_classification_2.jsonl``,…,\ ``image_classification_N.jsonl`` + will be created, where N may be 1, and depends on the total number of + the successfully predicted images and annotations. A single image + will be listed only once with all its annotations, and its + annotations will never be split across files. Each .JSONL file will + contain, per line, a JSON representation of a proto that wraps + image’s “ID” : “” followed by a list of zero or more + AnnotationPayload protos (called annotations), which have + classification detail populated. If prediction for any image failed + (partially or completely), then an additional ``errors_1.jsonl``, + ``errors_2.jsonl``,…, ``errors_N.jsonl`` files will be created (N + depends on total number of failed predictions). These files will have + a JSON representation of a proto that wraps the same “ID” : “” but + here followed by exactly one ```google.rpc.Status`` `_\_ + containing only ``code`` and ``message``\ fields. - For Image Object + Detection: In the created directory files + ``image_object_detection_1.jsonl``, + ``image_object_detection_2.jsonl``,…,\ + ``image_object_detection_N.jsonl`` will be created, where N may be + 1, and depends on the total number of the successfully predicted + images and annotations. Each .JSONL file will contain, per line, a + JSON representation of a proto that wraps image’s “ID” : “” + followed by a list of zero or more AnnotationPayload protos (called + annotations), which have image_object_detection detail populated. A + single image will be listed only once with all its annotations, and + its annotations will never be split across files. If prediction for + any image failed (partially or completely), then additional + ``errors_1.jsonl``, ``errors_2.jsonl``,…, ``errors_N.jsonl`` files + will be created (N depends on total number of failed predictions). + These files will have a JSON representation of a proto that wraps + the same “ID” : “” but here followed by exactly one + ```google.rpc.Status`` `__ containing only ``code`` and + ``message``\ fields. \* For Video Classification: In the created + directory a video_classification.csv file, and a .JSON file per each + video classification requested in the input (i.e. each line in given + CSV(s)), will be created. :: The format of + video_classification.csv is: GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SE + GMENT_END,JSON_FILE_NAME,STATUS where: + GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to 1 the + prediction input lines (i.e. video_classification.csv has precisely + the same number of lines as the prediction input had.) JSON_FILE_NAME + = Name of .JSON file in the output directory, which contains + prediction responses for the video time segment. STATUS = “OK” if + prediction completed successfully, or an error code with message + otherwise. If STATUS is not “OK” then the .JSON file for that line may + not exist or be empty. :: Each .JSON file, assuming STATUS is + "OK", will contain a list of AnnotationPayload protos in JSON + format, which are the predictions for the video time segment + the file is assigned to in the video_classification.csv. All + AnnotationPayload protos will have video_classification field + set, and will be sorted by video_classification.type field + (note that the returned types are governed by + `classifaction_types` parameter in + [PredictService.BatchPredictRequest.params][]). - For Video Object + Tracking: In the created directory a video_object_tracking.csv file + will be created, and multiple files video_object_trackinng_1.json, + video_object_trackinng_2.json,…, video_object_trackinng_N.json, + where N is the number of requests in the input (i.e. the number of + lines in given CSV(s)). :: The format of + video_object_tracking.csv is: GCS_FILE_PATH,TIME_SEGMENT_START,TIME_S + EGMENT_END,JSON_FILE_NAME,STATUS where: + GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to 1 the + prediction input lines (i.e. video_object_tracking.csv has precisely + the same number of lines as the prediction input had.) JSON_FILE_NAME + = Name of .JSON file in the output directory, which contains + prediction responses for the video time segment. STATUS = “OK” if + prediction completed successfully, or an error code with message + otherwise. If STATUS is not “OK” then the .JSON file for that line may + not exist or be empty. :: Each .JSON file, assuming STATUS is + "OK", will contain a list of AnnotationPayload protos in JSON + format, which are the predictions for each frame of the video + time segment the file is assigned to in + video_object_tracking.csv. All AnnotationPayload protos will have + video_object_tracking field set. - For Text Classification: In the + created directory files ``text_classification_1.jsonl``, + ``text_classification_2.jsonl``,…,\ ``text_classification_N.jsonl`` + will be created, where N may be 1, and depends on the total number of + inputs and annotations found. :: Each .JSONL file will + contain, per line, a JSON representation of a proto that wraps + input text snippet or input text file and a list of zero or more + AnnotationPayload protos (called annotations), which have + classification detail populated. A single text snippet or file + will be listed only once with all its annotations, and its + annotations will never be split across files. If prediction for + any text snippet or file failed (partially or completely), then + additional `errors_1.jsonl`, `errors_2.jsonl`,..., + `errors_N.jsonl` files will be created (N depends on total number of + failed predictions). These files will have a JSON representation of a + proto that wraps input text snippet or input text file followed by + exactly one ```google.rpc.Status`` `__ containing only + ``code`` and ``message``. - For Text Sentiment: In the created + directory files ``text_sentiment_1.jsonl``, + ``text_sentiment_2.jsonl``,…,\ ``text_sentiment_N.jsonl`` will be + created, where N may be 1, and depends on the total number of inputs + and annotations found. :: Each .JSONL file will contain, + per line, a JSON representation of a proto that wraps input text + snippet or input text file and a list of zero or more + AnnotationPayload protos (called annotations), which have + text_sentiment detail populated. A single text snippet or file + will be listed only once with all its annotations, and its + annotations will never be split across files. If prediction for + any text snippet or file failed (partially or completely), then + additional `errors_1.jsonl`, `errors_2.jsonl`,..., + `errors_N.jsonl` files will be created (N depends on total number of + failed predictions). These files will have a JSON representation of a + proto that wraps input text snippet or input text file followed by + exactly one ```google.rpc.Status`` `__ containing only + ``code`` and ``message``. - For Text Extraction: In the created + directory files ``text_extraction_1.jsonl``, + ``text_extraction_2.jsonl``,…,\ ``text_extraction_N.jsonl`` will be + created, where N may be 1, and depends on the total number of inputs + and annotations found. The contents of these .JSONL file(s) depend on + whether the input used inline text, or documents. If input was + inline, then each .JSONL file will contain, per line, a JSON + representation of a proto that wraps given in request text snippet’s + “id” (if specified), followed by input text snippet, and a list of + zero or more AnnotationPayload protos (called annotations), which + have text_extraction detail populated. A single text snippet will be + listed only once with all its annotations, and its annotations will + never be split across files. If input used documents, then each + .JSONL file will contain, per line, a JSON representation of a proto + that wraps given in request document proto, followed by its OCR-ed + representation in the form of a text snippet, finally followed by a + list of zero or more AnnotationPayload protos (called annotations), + which have text_extraction detail populated and refer, via their + indices, to the OCR-ed text snippet. A single document (and its text + snippet) will be listed only once with all its annotations, and its + annotations will never be split across files. If prediction for any + text snippet failed (partially or completely), then additional + ``errors_1.jsonl``, ``errors_2.jsonl``,…, ``errors_N.jsonl`` files + will be created (N depends on total number of failed predictions). + These files will have a JSON representation of a proto that wraps + either the “id” : “” (in case of inline) or the document proto (in + case of document) but here followed by exactly one + ```google.rpc.Status`` `__ containing only ``code`` and + ``message``. - For Tables: Output depends on whether [gcs_destinati + on][google.cloud.automl.v1beta1.BatchPredictOutputConfig.gcs_destinati + on] or [bigquery_destination][google.cloud.automl.v1beta1.BatchPredic + tOutputConfig.bigquery_destination] is set (either is allowed). GCS + case: In the created directory files ``tables_1.csv``, + ``tables_2.csv``,…, ``tables_N.csv`` will be created, where N may be + 1, and depends on the total number of the successfully predicted rows. + For all CLASSIFICATION [prediction_type-s][google.cloud.automl.v1beta + 1.TablesModelMetadata.prediction_type]: Each .csv file will contain a + header, listing all columns’ + [display_name-s][google.cloud.automl.v1beta1.ColumnSpec.display_name] + given on input followed by M target column names in the format of "<[ + target_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.t + arget_column_spec] [display_name][google.cloud.automl.v1beta1.ColumnS + pec.display_name]>\_\_score" where M is the number of distinct target + values, i.e. number of distinct values in the target column of the + table used to train the model. Subsequent lines will contain the + respective values of successfully predicted rows, with the last, + i.e. the target, columns having the corresponding prediction [scores][google.cloud.automl.v1beta1.TablesAnnotation.score]. For - REGRESSION and FORECASTING - - [prediction\_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction\_type]: - Each .csv file will contain a header, listing all columns' - [display\_name-s][google.cloud.automl.v1beta1.display\_name] given on - input followed by the predicted target column with name in the format of - - "predicted\_<[target\_column\_specs][google.cloud.automl.v1beta1.TablesModelMetadata.target\_column\_spec] - - [display\_name][google.cloud.automl.v1beta1.ColumnSpec.display\_name]>" + REGRESSION and FORECASTING [prediction_type-s][google.cloud.automl.v1 + beta1.TablesModelMetadata.prediction_type]: Each .csv file will + contain a header, listing all columns’ + [display_name-s][google.cloud.automl.v1beta1.display_name] given on + input followed by the predicted target column with name in the format + of "predicted_<[target_column_specs][google.cloud.automl.v1beta1.Tabl + esModelMetadata.target_column_spec] + [display_name][google.cloud.automl.v1beta1.ColumnSpec.display_name]>" Subsequent lines will contain the respective values of successfully - predicted rows, with the last, i.e. the target, column having the + predicted rows, with the last, i.e. the target, column having the predicted target value. If prediction for any rows failed, then an - additional ``errors_1.csv``, ``errors_2.csv``,..., ``errors_N.csv`` will - be created (N depends on total number of failed rows). These files will - have analogous format as ``tables_*.csv``, but always with a single - target column having - - ```google.rpc.Status`` `__ + additional ``errors_1.csv``, ``errors_2.csv``,…, ``errors_N.csv`` will + be created (N depends on total number of failed rows). These files + will have analogous format as ``tables_*.csv``, but always with a + single target column having ```google.rpc.Status`` `_\_ represented as a JSON string, and containing only ``code`` and - ``message``. BigQuery case: - - [bigquery\_destination][google.cloud.automl.v1beta1.OutputConfig.bigquery\_destination] - pointing to a BigQuery project must be set. In the given project a new - dataset will be created with name - ``prediction__`` where - will be made BigQuery-dataset-name compatible (e.g. most special - characters will become underscores), and timestamp will be in - YYYY\_MM\_DDThh\_mm\_ss\_sssZ "based on ISO-8601" format. In the dataset - two tables will be created, ``predictions``, and ``errors``. The - ``predictions`` table's column names will be the input columns' - - [display\_name-s][google.cloud.automl.v1beta1.ColumnSpec.display\_name] - followed by the target column with name in the format of - - "predicted\_<[target\_column\_specs][google.cloud.automl.v1beta1.TablesModelMetadata.target\_column\_spec] - - [display\_name][google.cloud.automl.v1beta1.ColumnSpec.display\_name]>" + ``message``. BigQuery case: [bigquery_destination][google.cloud.autom + l.v1beta1.OutputConfig.bigquery_destination] pointing to a BigQuery + project must be set. In the given project a new dataset will be + created with name ``prediction__`` where will be made BigQuery-dataset-name compatible + (e.g. most special characters will become underscores), and timestamp + will be in YYYY_MM_DDThh_mm_ss_sssZ “based on ISO-8601” format. In the + dataset two tables will be created, ``predictions``, and ``errors``. + The ``predictions`` table’s column names will be the input columns’ + [display_name-s][google.cloud.automl.v1beta1.ColumnSpec.display_name] + followed by the target column with name in the format of "predicted_< + [target_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata. + target_column_spec] + [display_name][google.cloud.automl.v1beta1.ColumnSpec.display_name]>" The input feature columns will contain the respective values of successfully predicted rows, with the target column having an ARRAY of - [AnnotationPayloads][google.cloud.automl.v1beta1.AnnotationPayload], represented as STRUCT-s, containing [TablesAnnotation][google.cloud.automl.v1beta1.TablesAnnotation]. The ``errors`` table contains rows for which the prediction has failed, it has analogous input columns while the target column name is in the - format of - - "errors\_<[target\_column\_specs][google.cloud.automl.v1beta1.TablesModelMetadata.target\_column\_spec] - - [display\_name][google.cloud.automl.v1beta1.ColumnSpec.display\_name]>", - and as a value has - - ```google.rpc.Status`` `__ - represented as a STRUCT, and containing only ``code`` and ``message``. - + format of "errors_<[target_column_specs][google.cloud.automl.v1beta1. + TablesModelMetadata.target_column_spec] + [display_name][google.cloud.automl.v1beta1.ColumnSpec.display_name]>", + and as a value has ```google.rpc.Status`` `__ represented + as a STRUCT, and containing only ``code`` and ``message``. Attributes: destination: @@ -1687,27 +1651,26 @@ The BigQuery location where the output is to be written to. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.BatchPredictOutputConfig) - ), + }, ) _sym_db.RegisterMessage(BatchPredictOutputConfig) ModelExportOutputConfig = _reflection.GeneratedProtocolMessageType( "ModelExportOutputConfig", (_message.Message,), - dict( - ParamsEntry=_reflection.GeneratedProtocolMessageType( + { + "ParamsEntry": _reflection.GeneratedProtocolMessageType( "ParamsEntry", (_message.Message,), - dict( - DESCRIPTOR=_MODELEXPORTOUTPUTCONFIG_PARAMSENTRY, - __module__="google.cloud.automl_v1beta1.proto.io_pb2" + { + "DESCRIPTOR": _MODELEXPORTOUTPUTCONFIG_PARAMSENTRY, + "__module__": "google.cloud.automl_v1beta1.proto.io_pb2" # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ModelExportOutputConfig.ParamsEntry) - ), + }, ), - DESCRIPTOR=_MODELEXPORTOUTPUTCONFIG, - __module__="google.cloud.automl_v1beta1.proto.io_pb2", - __doc__="""Output configuration for ModelExport Action. - + "DESCRIPTOR": _MODELEXPORTOUTPUTCONFIG, + "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", + "__doc__": """Output configuration for ModelExport Action. Attributes: destination: @@ -1715,49 +1678,49 @@ gcs_destination: The Google Cloud Storage location where the model is to be written to. This location may only be set for the following - model formats: "tflite", "edgetpu\_tflite", - "tf\_saved\_model", "tf\_js", "core\_ml". Under the directory - given as the destination a new one with name "model-export--", - where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 - format, will be created. Inside the model and any of its - supporting files will be written. + model formats: “tflite”, “edgetpu_tflite”, “tf_saved_model”, + “tf_js”, “core_ml”. Under the directory given as the + destination a new one with name “model-export--”, where + timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format, will + be created. Inside the model and any of its supporting files + will be written. gcr_destination: The GCR location where model image is to be pushed to. This location may only be set for the following model formats: - "docker". The model image will be created under the given + “docker”. The model image will be created under the given URI. model_format: The format in which the model must be exported. The available, and default, formats depend on the problem and model type (if - given problem and type combination doesn't have a format + given problem and type combination doesn’t have a format listed, it means its models are not exportable): - For Image Classification mobile-low-latency-1, mobile-versatile-1, - mobile-high-accuracy-1: "tflite" (default), "edgetpu\_tflite", - "tf\_saved\_model", "tf\_js", "docker". - For Image + mobile-high-accuracy-1: “tflite” (default), “edgetpu_tflite”, + “tf_saved_model”, “tf_js”, “docker”. - For Image Classification mobile-core-ml-low-latency-1, mobile-core- - ml-versatile-1, mobile-core-ml-high-accuracy-1: "core\_ml" + ml-versatile-1, mobile-core-ml-high-accuracy-1: “core_ml” (default). Formats description: - tflite - Used for Android - mobile devices. - edgetpu\_tflite - Used for `Edge TPU + mobile devices. - edgetpu_tflite - Used for `Edge TPU `__ devices. - - tf\_saved\_model - A tensorflow model in SavedModel format. - - tf\_js - A `TensorFlow.js `__ + tf_saved_model - A tensorflow model in SavedModel format. - + tf_js - A `TensorFlow.js `_\_ model that can be used in the browser and in Node.js using JavaScript. - docker - Used for Docker containers. Use the params field to customize the container. The container is verified to work correctly on ubuntu 16.04 operating system. See more at [containers quickstart](https: //cloud.google.com/vision/automl/docs/containers-gcs- - quickstart) \* core\_ml - Used for iOS mobile devices. + quickstart) \* core_ml - Used for iOS mobile devices. params: Additional model-type and format specific parameters describing the requirements for the to be exported model files, any string must be up to 25000 characters long. - For - ``docker`` format: ``cpu_architecture`` - (string) "x86\_64" - (default). ``gpu_architecture`` - (string) "none" (default), - "nvidia". + ``docker`` format: ``cpu_architecture`` - (string) “x86_64” + (default). ``gpu_architecture`` - (string) “none” (default), + “nvidia”. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ModelExportOutputConfig) - ), + }, ) _sym_db.RegisterMessage(ModelExportOutputConfig) _sym_db.RegisterMessage(ModelExportOutputConfig.ParamsEntry) @@ -1765,38 +1728,32 @@ ExportEvaluatedExamplesOutputConfig = _reflection.GeneratedProtocolMessageType( "ExportEvaluatedExamplesOutputConfig", (_message.Message,), - dict( - DESCRIPTOR=_EXPORTEVALUATEDEXAMPLESOUTPUTCONFIG, - __module__="google.cloud.automl_v1beta1.proto.io_pb2", - __doc__="""Output configuration for ExportEvaluatedExamples Action. - Note that this call is available only for 30 days since the moment the - model was evaluated. The output depends on the domain, as follows (note - that only examples from the TEST set are exported): - - - For Tables: - - [bigquery\_destination][google.cloud.automl.v1beta1.OutputConfig.bigquery\_destination] - pointing to a BigQuery project must be set. In the given project a new - dataset will be created with name - - ``export_evaluated_examples__`` - where will be made BigQuery-dataset-name compatible (e.g. most special - characters will become underscores), and timestamp will be in - YYYY\_MM\_DDThh\_mm\_ss\_sssZ "based on ISO-8601" format. In the dataset - an ``evaluated_examples`` table will be created. It will have all the - same columns as the - - [primary\_table][google.cloud.automl.v1beta1.TablesDatasetMetadata.primary\_table\_spec\_id] - of the [dataset][google.cloud.automl.v1beta1.Model.dataset\_id] from - which the model was created, as they were at the moment of model's - evaluation (this includes the target column with its ground truth), - followed by a column called "predicted\_". That last column will contain - the model's prediction result for each respective row, given as ARRAY of + { + "DESCRIPTOR": _EXPORTEVALUATEDEXAMPLESOUTPUTCONFIG, + "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", + "__doc__": """Output configuration for ExportEvaluatedExamples Action. Note that + this call is available only for 30 days since the moment the model was + evaluated. The output depends on the domain, as follows (note that + only examples from the TEST set are exported): - For Tables: [bigqu + ery_destination][google.cloud.automl.v1beta1.OutputConfig.bigquery_des + tination] pointing to a BigQuery project must be set. In the given + project a new dataset will be created with name + ``export_evaluated_examples__`` where will be made BigQuery-dataset-name compatible (e.g. most + special characters will become underscores), and timestamp will be in + YYYY_MM_DDThh_mm_ss_sssZ “based on ISO-8601” format. In the dataset an + ``evaluated_examples`` table will be created. It will have all the + same columns as the [primary_table][google.cloud.automl.v1beta1.Table + sDatasetMetadata.primary_table_spec_id] of the + [dataset][google.cloud.automl.v1beta1.Model.dataset_id] from which the + model was created, as they were at the moment of model’s evaluation + (this includes the target column with its ground truth), followed by a + column called “predicted\_”. That last column will contain the model’s + prediction result for each respective row, given as ARRAY of [AnnotationPayloads][google.cloud.automl.v1beta1.AnnotationPayload], represented as STRUCT-s, containing [TablesAnnotation][google.cloud.automl.v1beta1.TablesAnnotation]. - Attributes: destination: Required. The destination of the output. @@ -1804,59 +1761,56 @@ The BigQuery location where the output is to be written to. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ExportEvaluatedExamplesOutputConfig) - ), + }, ) _sym_db.RegisterMessage(ExportEvaluatedExamplesOutputConfig) GcsSource = _reflection.GeneratedProtocolMessageType( "GcsSource", (_message.Message,), - dict( - DESCRIPTOR=_GCSSOURCE, - __module__="google.cloud.automl_v1beta1.proto.io_pb2", - __doc__="""The Google Cloud Storage location for the input content. - + { + "DESCRIPTOR": _GCSSOURCE, + "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", + "__doc__": """The Google Cloud Storage location for the input content. Attributes: input_uris: Required. Google Cloud Storage URIs to input files, up to 2000 - characters long. Accepted forms: \* Full object path, e.g. - gs://bucket/directory/object.csv + characters long. Accepted forms: \* Full object path, + e.g. gs://bucket/directory/object.csv """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.GcsSource) - ), + }, ) _sym_db.RegisterMessage(GcsSource) BigQuerySource = _reflection.GeneratedProtocolMessageType( "BigQuerySource", (_message.Message,), - dict( - DESCRIPTOR=_BIGQUERYSOURCE, - __module__="google.cloud.automl_v1beta1.proto.io_pb2", - __doc__="""The BigQuery location for the input content. - + { + "DESCRIPTOR": _BIGQUERYSOURCE, + "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", + "__doc__": """The BigQuery location for the input content. Attributes: input_uri: Required. BigQuery URI to a table, up to 2000 characters long. - Accepted forms: \* BigQuery path e.g. - bq://projectId.bqDatasetId.bqTableId + Accepted forms: \* BigQuery path + e.g. bq://projectId.bqDatasetId.bqTableId """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.BigQuerySource) - ), + }, ) _sym_db.RegisterMessage(BigQuerySource) GcsDestination = _reflection.GeneratedProtocolMessageType( "GcsDestination", (_message.Message,), - dict( - DESCRIPTOR=_GCSDESTINATION, - __module__="google.cloud.automl_v1beta1.proto.io_pb2", - __doc__="""The Google Cloud Storage location where the output is to - be written to. - + { + "DESCRIPTOR": _GCSDESTINATION, + "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", + "__doc__": """The Google Cloud Storage location where the output is to be written + to. Attributes: output_uri_prefix: @@ -1864,53 +1818,51 @@ 2000 characters long. Accepted forms: \* Prefix path: gs://bucket/directory The requesting user must have write permission to the bucket. The directory is created if it - doesn't exist. + doesn’t exist. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.GcsDestination) - ), + }, ) _sym_db.RegisterMessage(GcsDestination) BigQueryDestination = _reflection.GeneratedProtocolMessageType( "BigQueryDestination", (_message.Message,), - dict( - DESCRIPTOR=_BIGQUERYDESTINATION, - __module__="google.cloud.automl_v1beta1.proto.io_pb2", - __doc__="""The BigQuery location for the output content. - + { + "DESCRIPTOR": _BIGQUERYDESTINATION, + "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", + "__doc__": """The BigQuery location for the output content. Attributes: output_uri: Required. BigQuery URI to a project, up to 2000 characters - long. Accepted forms: \* BigQuery path e.g. bq://projectId + long. Accepted forms: \* BigQuery path e.g. bq://projectId """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.BigQueryDestination) - ), + }, ) _sym_db.RegisterMessage(BigQueryDestination) GcrDestination = _reflection.GeneratedProtocolMessageType( "GcrDestination", (_message.Message,), - dict( - DESCRIPTOR=_GCRDESTINATION, - __module__="google.cloud.automl_v1beta1.proto.io_pb2", - __doc__="""The GCR location where the image must be pushed to. - + { + "DESCRIPTOR": _GCRDESTINATION, + "__module__": "google.cloud.automl_v1beta1.proto.io_pb2", + "__doc__": """The GCR location where the image must be pushed to. Attributes: output_uri: Required. Google Contained Registry URI of the new image, up to 2000 characters long. See https: //cloud.google.com/container-registry/do // cs/pushing-and- - pulling#pushing\_an\_image\_to\_a\_registry Accepted forms: \* + pulling#pushing_an_image_to_a_registry Accepted forms: \* [HOSTNAME]/[PROJECT-ID]/[IMAGE] \* [HOSTNAME]/[PROJECT- ID]/[IMAGE]:[TAG] The requesting user must have permission to push images the project. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.GcrDestination) - ), + }, ) _sym_db.RegisterMessage(GcrDestination) diff --git a/google/cloud/automl_v1beta1/proto/model.proto b/google/cloud/automl_v1beta1/proto/model.proto index 8c53d9b5..2b2e8d73 100644 --- a/google/cloud/automl_v1beta1/proto/model.proto +++ b/google/cloud/automl_v1beta1/proto/model.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,12 +11,12 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; package google.cloud.automl.v1beta1; +import "google/api/resource.proto"; import "google/cloud/automl/v1beta1/image.proto"; import "google/cloud/automl/v1beta1/tables.proto"; import "google/cloud/automl/v1beta1/text.proto"; @@ -33,6 +33,11 @@ option ruby_package = "Google::Cloud::AutoML::V1beta1"; // API proto representing a trained machine learning model. message Model { + option (google.api.resource) = { + type: "automl.googleapis.com/Model" + pattern: "projects/{project}/locations/{location}/models/{model}" + }; + // Deployment state of the model. enum DeploymentState { // Should not be used, an un-set enum has this value by default. diff --git a/google/cloud/automl_v1beta1/proto/model_evaluation.proto b/google/cloud/automl_v1beta1/proto/model_evaluation.proto index ce2db614..d5633fcd 100644 --- a/google/cloud/automl_v1beta1/proto/model_evaluation.proto +++ b/google/cloud/automl_v1beta1/proto/model_evaluation.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,12 +11,12 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; package google.cloud.automl.v1beta1; +import "google/api/resource.proto"; import "google/cloud/automl/v1beta1/classification.proto"; import "google/cloud/automl/v1beta1/detection.proto"; import "google/cloud/automl/v1beta1/regression.proto"; @@ -35,6 +35,11 @@ option ruby_package = "Google::Cloud::AutoML::V1beta1"; // Evaluation results of a model. message ModelEvaluation { + option (google.api.resource) = { + type: "automl.googleapis.com/ModelEvaluation" + pattern: "projects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}" + }; + // Output only. Problem type specific evaluation metrics. oneof metrics { // Model evaluation metrics for image, text, video and tables diff --git a/google/cloud/automl_v1beta1/proto/model_evaluation_pb2.py b/google/cloud/automl_v1beta1/proto/model_evaluation_pb2.py index 681a2663..4dee2ad7 100644 --- a/google/cloud/automl_v1beta1/proto/model_evaluation_pb2.py +++ b/google/cloud/automl_v1beta1/proto/model_evaluation_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/model_evaluation.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -15,6 +12,7 @@ _sym_db = _symbol_database.Default() +from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 from google.cloud.automl_v1beta1.proto import ( classification_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2, ) @@ -44,13 +42,11 @@ name="google/cloud/automl_v1beta1/proto/model_evaluation.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - '\n8google/cloud/automl_v1beta1/proto/model_evaluation.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto\x1a\x31google/cloud/automl_v1beta1/proto/detection.proto\x1a\x32google/cloud/automl_v1beta1/proto/regression.proto\x1a.google/cloud/automl_v1beta1/proto/tables.proto\x1a\x37google/cloud/automl_v1beta1/proto/text_extraction.proto\x1a\x36google/cloud/automl_v1beta1/proto/text_sentiment.proto\x1a\x33google/cloud/automl_v1beta1/proto/translation.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/api/annotations.proto"\xa7\x07\n\x0fModelEvaluation\x12i\n!classification_evaluation_metrics\x18\x08 \x01(\x0b\x32<.google.cloud.automl.v1beta1.ClassificationEvaluationMetricsH\x00\x12\x61\n\x1dregression_evaluation_metrics\x18\x18 \x01(\x0b\x32\x38.google.cloud.automl.v1beta1.RegressionEvaluationMetricsH\x00\x12\x63\n\x1etranslation_evaluation_metrics\x18\t \x01(\x0b\x32\x39.google.cloud.automl.v1beta1.TranslationEvaluationMetricsH\x00\x12w\n)image_object_detection_evaluation_metrics\x18\x0c \x01(\x0b\x32\x42.google.cloud.automl.v1beta1.ImageObjectDetectionEvaluationMetricsH\x00\x12u\n(video_object_tracking_evaluation_metrics\x18\x0e \x01(\x0b\x32\x41.google.cloud.automl.v1beta1.VideoObjectTrackingEvaluationMetricsH\x00\x12h\n!text_sentiment_evaluation_metrics\x18\x0b \x01(\x0b\x32;.google.cloud.automl.v1beta1.TextSentimentEvaluationMetricsH\x00\x12j\n"text_extraction_evaluation_metrics\x18\r \x01(\x0b\x32<.google.cloud.automl.v1beta1.TextExtractionEvaluationMetricsH\x00\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1a\n\x12\x61nnotation_spec_id\x18\x02 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x0f \x01(\t\x12/\n\x0b\x63reate_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x1f\n\x17\x65valuated_example_count\x18\x06 \x01(\x05\x42\t\n\x07metricsB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3' - ), + serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n8google/cloud/automl_v1beta1/proto/model_evaluation.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x19google/api/resource.proto\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto\x1a\x31google/cloud/automl_v1beta1/proto/detection.proto\x1a\x32google/cloud/automl_v1beta1/proto/regression.proto\x1a.google/cloud/automl_v1beta1/proto/tables.proto\x1a\x37google/cloud/automl_v1beta1/proto/text_extraction.proto\x1a\x36google/cloud/automl_v1beta1/proto/text_sentiment.proto\x1a\x33google/cloud/automl_v1beta1/proto/translation.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/api/annotations.proto"\xb1\x08\n\x0fModelEvaluation\x12i\n!classification_evaluation_metrics\x18\x08 \x01(\x0b\x32<.google.cloud.automl.v1beta1.ClassificationEvaluationMetricsH\x00\x12\x61\n\x1dregression_evaluation_metrics\x18\x18 \x01(\x0b\x32\x38.google.cloud.automl.v1beta1.RegressionEvaluationMetricsH\x00\x12\x63\n\x1etranslation_evaluation_metrics\x18\t \x01(\x0b\x32\x39.google.cloud.automl.v1beta1.TranslationEvaluationMetricsH\x00\x12w\n)image_object_detection_evaluation_metrics\x18\x0c \x01(\x0b\x32\x42.google.cloud.automl.v1beta1.ImageObjectDetectionEvaluationMetricsH\x00\x12u\n(video_object_tracking_evaluation_metrics\x18\x0e \x01(\x0b\x32\x41.google.cloud.automl.v1beta1.VideoObjectTrackingEvaluationMetricsH\x00\x12h\n!text_sentiment_evaluation_metrics\x18\x0b \x01(\x0b\x32;.google.cloud.automl.v1beta1.TextSentimentEvaluationMetricsH\x00\x12j\n"text_extraction_evaluation_metrics\x18\r \x01(\x0b\x32<.google.cloud.automl.v1beta1.TextExtractionEvaluationMetricsH\x00\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1a\n\x12\x61nnotation_spec_id\x18\x02 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x0f \x01(\t\x12/\n\x0b\x63reate_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x1f\n\x17\x65valuated_example_count\x18\x06 \x01(\x05:\x87\x01\xea\x41\x83\x01\n%automl.googleapis.com/ModelEvaluation\x12Zprojects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}B\t\n\x07metricsB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ + google_dot_api_dot_resource__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_detection__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_regression__pb2.DESCRIPTOR, @@ -70,6 +66,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="classification_evaluation_metrics", @@ -88,6 +85,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="regression_evaluation_metrics", @@ -106,6 +104,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="translation_evaluation_metrics", @@ -124,6 +123,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="image_object_detection_evaluation_metrics", @@ -142,6 +142,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="video_object_tracking_evaluation_metrics", @@ -160,6 +161,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="text_sentiment_evaluation_metrics", @@ -178,6 +180,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="text_extraction_evaluation_metrics", @@ -196,6 +199,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="name", @@ -206,7 +210,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -214,6 +218,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="annotation_spec_id", @@ -224,7 +229,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -232,6 +237,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="display_name", @@ -242,7 +248,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -250,6 +256,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="create_time", @@ -268,6 +275,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="evaluated_example_count", @@ -286,12 +294,13 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], nested_types=[], enum_types=[], - serialized_options=None, + serialized_options=b"\352A\203\001\n%automl.googleapis.com/ModelEvaluation\022Zprojects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}", is_extendable=False, syntax="proto3", extension_ranges=[], @@ -301,11 +310,12 @@ full_name="google.cloud.automl.v1beta1.ModelEvaluation.metrics", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], - ) + ), ], - serialized_start=526, - serialized_end=1461, + serialized_start=553, + serialized_end=1626, ) _MODELEVALUATION.fields_by_name[ @@ -394,11 +404,10 @@ ModelEvaluation = _reflection.GeneratedProtocolMessageType( "ModelEvaluation", (_message.Message,), - dict( - DESCRIPTOR=_MODELEVALUATION, - __module__="google.cloud.automl_v1beta1.proto.model_evaluation_pb2", - __doc__="""Evaluation results of a model. - + { + "DESCRIPTOR": _MODELEVALUATION, + "__module__": "google.cloud.automl_v1beta1.proto.model_evaluation_pb2", + "__doc__": """Evaluation results of a model. Attributes: metrics: @@ -430,40 +439,41 @@ evaluation applies to. The The ID is empty for the overall model evaluation. For Tables annotation specs in the dataset do not exist and this ID is always not set, but for - CLASSIFICATION [prediction\_type-s][google.cloud.automl.v1bet - a1.TablesModelMetadata.prediction\_type] the [display\_name][g - oogle.cloud.automl.v1beta1.ModelEvaluation.display\_name] - field is used. + CLASSIFICATION [prediction_type-s][google.cloud.automl.v1beta + 1.TablesModelMetadata.prediction_type] the [display_name][goog + le.cloud.automl.v1beta1.ModelEvaluation.display_name] field is + used. display_name: - Output only. The value of [display\_name][google.cloud.automl. - v1beta1.AnnotationSpec.display\_name] at the moment when the + Output only. The value of [display_name][google.cloud.automl.v + 1beta1.AnnotationSpec.display_name] at the moment when the model was trained. Because this field returns a value at model training time, for different models trained from the same dataset, the values may differ, since display names could had - been changed between the two model's trainings. For Tables - CLASSIFICATION [prediction\_type-s][google.cloud.automl.v1bet - a1.TablesModelMetadata.prediction\_type] distinct values of - the target column at the moment of the model evaluation are - populated here. The display\_name is empty for the overall + been changed between the two model’s trainings. For Tables + CLASSIFICATION [prediction_type-s][google.cloud.automl.v1beta + 1.TablesModelMetadata.prediction_type] distinct values of the + target column at the moment of the model evaluation are + populated here. The display_name is empty for the overall model evaluation. create_time: Output only. Timestamp when this model evaluation was created. evaluated_example_count: Output only. The number of examples used for model evaluation, - i.e. for which ground truth from time of model creation is + i.e. for which ground truth from time of model creation is compared against the predicted annotations created by the - model. For overall ModelEvaluation (i.e. with - annotation\_spec\_id not set) this is the total number of all + model. For overall ModelEvaluation (i.e. with + annotation_spec_id not set) this is the total number of all examples used for evaluation. Otherwise, this is the count of examples that according to the ground truth were annotated by - the [annotation\_spec\_id][google.cloud.automl.v1beta1.ModelE - valuation.annotation\_spec\_id]. + the [annotation_spec_id][google.cloud.automl.v1beta1.ModelEva + luation.annotation_spec_id]. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ModelEvaluation) - ), + }, ) _sym_db.RegisterMessage(ModelEvaluation) DESCRIPTOR._options = None +_MODELEVALUATION._options = None # @@protoc_insertion_point(module_scope) diff --git a/google/cloud/automl_v1beta1/proto/model_pb2.py b/google/cloud/automl_v1beta1/proto/model_pb2.py index f669af20..cf935cca 100644 --- a/google/cloud/automl_v1beta1/proto/model_pb2.py +++ b/google/cloud/automl_v1beta1/proto/model_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/model.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -15,6 +12,7 @@ _sym_db = _symbol_database.Default() +from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 from google.cloud.automl_v1beta1.proto import ( image_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_image__pb2, ) @@ -38,13 +36,11 @@ name="google/cloud/automl_v1beta1/proto/model.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - '\n-google/cloud/automl_v1beta1/proto/model.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a-google/cloud/automl_v1beta1/proto/image.proto\x1a.google/cloud/automl_v1beta1/proto/tables.proto\x1a,google/cloud/automl_v1beta1/proto/text.proto\x1a\x33google/cloud/automl_v1beta1/proto/translation.proto\x1a-google/cloud/automl_v1beta1/proto/video.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/api/annotations.proto"\xf2\t\n\x05Model\x12[\n\x1atranslation_model_metadata\x18\x0f \x01(\x0b\x32\x35.google.cloud.automl.v1beta1.TranslationModelMetadataH\x00\x12l\n#image_classification_model_metadata\x18\r \x01(\x0b\x32=.google.cloud.automl.v1beta1.ImageClassificationModelMetadataH\x00\x12j\n"text_classification_model_metadata\x18\x0e \x01(\x0b\x32<.google.cloud.automl.v1beta1.TextClassificationModelMetadataH\x00\x12o\n%image_object_detection_model_metadata\x18\x14 \x01(\x0b\x32>.google.cloud.automl.v1beta1.ImageObjectDetectionModelMetadataH\x00\x12l\n#video_classification_model_metadata\x18\x17 \x01(\x0b\x32=.google.cloud.automl.v1beta1.VideoClassificationModelMetadataH\x00\x12m\n$video_object_tracking_model_metadata\x18\x15 \x01(\x0b\x32=.google.cloud.automl.v1beta1.VideoObjectTrackingModelMetadataH\x00\x12\x62\n\x1etext_extraction_model_metadata\x18\x13 \x01(\x0b\x32\x38.google.cloud.automl.v1beta1.TextExtractionModelMetadataH\x00\x12Q\n\x15tables_model_metadata\x18\x18 \x01(\x0b\x32\x30.google.cloud.automl.v1beta1.TablesModelMetadataH\x00\x12`\n\x1dtext_sentiment_model_metadata\x18\x16 \x01(\x0b\x32\x37.google.cloud.automl.v1beta1.TextSentimentModelMetadataH\x00\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12\x12\n\ndataset_id\x18\x03 \x01(\t\x12/\n\x0b\x63reate_time\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x0b \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12L\n\x10\x64\x65ployment_state\x18\x08 \x01(\x0e\x32\x32.google.cloud.automl.v1beta1.Model.DeploymentState"Q\n\x0f\x44\x65ploymentState\x12 \n\x1c\x44\x45PLOYMENT_STATE_UNSPECIFIED\x10\x00\x12\x0c\n\x08\x44\x45PLOYED\x10\x01\x12\x0e\n\nUNDEPLOYED\x10\x02\x42\x10\n\x0emodel_metadataB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3' - ), + serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n-google/cloud/automl_v1beta1/proto/model.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x19google/api/resource.proto\x1a-google/cloud/automl_v1beta1/proto/image.proto\x1a.google/cloud/automl_v1beta1/proto/tables.proto\x1a,google/cloud/automl_v1beta1/proto/text.proto\x1a\x33google/cloud/automl_v1beta1/proto/translation.proto\x1a-google/cloud/automl_v1beta1/proto/video.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/api/annotations.proto"\xcc\n\n\x05Model\x12[\n\x1atranslation_model_metadata\x18\x0f \x01(\x0b\x32\x35.google.cloud.automl.v1beta1.TranslationModelMetadataH\x00\x12l\n#image_classification_model_metadata\x18\r \x01(\x0b\x32=.google.cloud.automl.v1beta1.ImageClassificationModelMetadataH\x00\x12j\n"text_classification_model_metadata\x18\x0e \x01(\x0b\x32<.google.cloud.automl.v1beta1.TextClassificationModelMetadataH\x00\x12o\n%image_object_detection_model_metadata\x18\x14 \x01(\x0b\x32>.google.cloud.automl.v1beta1.ImageObjectDetectionModelMetadataH\x00\x12l\n#video_classification_model_metadata\x18\x17 \x01(\x0b\x32=.google.cloud.automl.v1beta1.VideoClassificationModelMetadataH\x00\x12m\n$video_object_tracking_model_metadata\x18\x15 \x01(\x0b\x32=.google.cloud.automl.v1beta1.VideoObjectTrackingModelMetadataH\x00\x12\x62\n\x1etext_extraction_model_metadata\x18\x13 \x01(\x0b\x32\x38.google.cloud.automl.v1beta1.TextExtractionModelMetadataH\x00\x12Q\n\x15tables_model_metadata\x18\x18 \x01(\x0b\x32\x30.google.cloud.automl.v1beta1.TablesModelMetadataH\x00\x12`\n\x1dtext_sentiment_model_metadata\x18\x16 \x01(\x0b\x32\x37.google.cloud.automl.v1beta1.TextSentimentModelMetadataH\x00\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x64isplay_name\x18\x02 \x01(\t\x12\x12\n\ndataset_id\x18\x03 \x01(\t\x12/\n\x0b\x63reate_time\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x0b \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12L\n\x10\x64\x65ployment_state\x18\x08 \x01(\x0e\x32\x32.google.cloud.automl.v1beta1.Model.DeploymentState"Q\n\x0f\x44\x65ploymentState\x12 \n\x1c\x44\x45PLOYMENT_STATE_UNSPECIFIED\x10\x00\x12\x0c\n\x08\x44\x45PLOYED\x10\x01\x12\x0e\n\nUNDEPLOYED\x10\x02:X\xea\x41U\n\x1b\x61utoml.googleapis.com/Model\x12\x36projects/{project}/locations/{location}/models/{model}B\x10\n\x0emodel_metadataB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ + google_dot_api_dot_resource__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_image__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_tables__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_text__pb2.DESCRIPTOR, @@ -61,6 +57,7 @@ full_name="google.cloud.automl.v1beta1.Model.DeploymentState", filename=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, values=[ _descriptor.EnumValueDescriptor( name="DEPLOYMENT_STATE_UNSPECIFIED", @@ -68,18 +65,29 @@ number=0, serialized_options=None, type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="DEPLOYED", index=1, number=1, serialized_options=None, type=None + name="DEPLOYED", + index=1, + number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), _descriptor.EnumValueDescriptor( - name="UNDEPLOYED", index=2, number=2, serialized_options=None, type=None + name="UNDEPLOYED", + index=2, + number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key, ), ], containing_type=None, serialized_options=None, - serialized_start=1550, - serialized_end=1631, + serialized_start=1577, + serialized_end=1658, ) _sym_db.RegisterEnumDescriptor(_MODEL_DEPLOYMENTSTATE) @@ -90,6 +98,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="translation_model_metadata", @@ -108,6 +117,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="image_classification_model_metadata", @@ -126,6 +136,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="text_classification_model_metadata", @@ -144,6 +155,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="image_object_detection_model_metadata", @@ -162,6 +174,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="video_classification_model_metadata", @@ -180,6 +193,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="video_object_tracking_model_metadata", @@ -198,6 +212,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="text_extraction_model_metadata", @@ -216,6 +231,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="tables_model_metadata", @@ -234,6 +250,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="text_sentiment_model_metadata", @@ -252,6 +269,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="name", @@ -262,7 +280,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -270,6 +288,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="display_name", @@ -280,7 +299,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -288,6 +307,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="dataset_id", @@ -298,7 +318,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -306,6 +326,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="create_time", @@ -324,6 +345,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="update_time", @@ -342,6 +364,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="deployment_state", @@ -360,12 +383,13 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], nested_types=[], - enum_types=[_MODEL_DEPLOYMENTSTATE], - serialized_options=None, + enum_types=[_MODEL_DEPLOYMENTSTATE,], + serialized_options=b"\352AU\n\033automl.googleapis.com/Model\0226projects/{project}/locations/{location}/models/{model}", is_extendable=False, syntax="proto3", extension_ranges=[], @@ -375,11 +399,12 @@ full_name="google.cloud.automl.v1beta1.Model.model_metadata", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], - ) + ), ], - serialized_start=383, - serialized_end=1649, + serialized_start=410, + serialized_end=1766, ) _MODEL.fields_by_name[ @@ -495,11 +520,10 @@ Model = _reflection.GeneratedProtocolMessageType( "Model", (_message.Message,), - dict( - DESCRIPTOR=_MODEL, - __module__="google.cloud.automl_v1beta1.proto.model_pb2", - __doc__="""API proto representing a trained machine learning model. - + { + "DESCRIPTOR": _MODEL, + "__module__": "google.cloud.automl_v1beta1.proto.model_pb2", + "__doc__": """API proto representing a trained machine learning model. Attributes: model_metadata: @@ -530,7 +554,7 @@ display_name: Required. The name of the model to show in the interface. The name can be up to 32 characters long and can consist only of - ASCII Latin letters A-Z and a-z, underscores (\_), and ASCII + ASCII Latin letters A-Z and a-z, underscores (_), and ASCII digits 0-9. It must start with a letter. dataset_id: Required. The resource ID of the dataset used to create the @@ -546,10 +570,11 @@ serve prediction requests after it gets deployed. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.Model) - ), + }, ) _sym_db.RegisterMessage(Model) DESCRIPTOR._options = None +_MODEL._options = None # @@protoc_insertion_point(module_scope) diff --git a/google/cloud/automl_v1beta1/proto/operations.proto b/google/cloud/automl_v1beta1/proto/operations.proto index 460321cc..cce3fedc 100644 --- a/google/cloud/automl_v1beta1/proto/operations.proto +++ b/google/cloud/automl_v1beta1/proto/operations.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,16 +11,18 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; package google.cloud.automl.v1beta1; -import "google/api/annotations.proto"; import "google/cloud/automl/v1beta1/io.proto"; +import "google/cloud/automl/v1beta1/model.proto"; +import "google/cloud/automl/v1beta1/model_evaluation.proto"; +import "google/protobuf/empty.proto"; import "google/protobuf/timestamp.proto"; import "google/rpc/status.proto"; +import "google/api/annotations.proto"; option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl"; option java_multiple_files = true; @@ -58,8 +60,7 @@ message OperationMetadata { ExportModelOperationMetadata export_model_details = 22; // Details of ExportEvaluatedExamples operation. - ExportEvaluatedExamplesOperationMetadata export_evaluated_examples_details = - 26; + ExportEvaluatedExamplesOperationMetadata export_evaluated_examples_details = 26; } // Output only. Progress of operation. Range: [0, 100]. @@ -80,19 +81,29 @@ message OperationMetadata { } // Details of operations that perform deletes of any entities. -message DeleteOperationMetadata {} +message DeleteOperationMetadata { + +} // Details of DeployModel operation. -message DeployModelOperationMetadata {} +message DeployModelOperationMetadata { + +} // Details of UndeployModel operation. -message UndeployModelOperationMetadata {} +message UndeployModelOperationMetadata { + +} // Details of CreateModel operation. -message CreateModelOperationMetadata {} +message CreateModelOperationMetadata { + +} // Details of ImportData operation. -message ImportDataOperationMetadata {} +message ImportDataOperationMetadata { + +} // Details of ExportData operation. message ExportDataOperationMetadata { diff --git a/google/cloud/automl_v1beta1/proto/operations_pb2.py b/google/cloud/automl_v1beta1/proto/operations_pb2.py index d1b13233..a6811b08 100644 --- a/google/cloud/automl_v1beta1/proto/operations_pb2.py +++ b/google/cloud/automl_v1beta1/proto/operations_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/operations.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -15,29 +12,36 @@ _sym_db = _symbol_database.Default() -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 from google.cloud.automl_v1beta1.proto import ( io_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_io__pb2, ) +from google.cloud.automl_v1beta1.proto import ( + model_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_model__pb2, +) +from google.cloud.automl_v1beta1.proto import ( + model_evaluation_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_model__evaluation__pb2, +) +from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2 +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name="google/cloud/automl_v1beta1/proto/operations.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - '\n2google/cloud/automl_v1beta1/proto/operations.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto\x1a*google/cloud/automl_v1beta1/proto/io.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto"\x8b\x08\n\x11OperationMetadata\x12N\n\x0e\x64\x65lete_details\x18\x08 \x01(\x0b\x32\x34.google.cloud.automl.v1beta1.DeleteOperationMetadataH\x00\x12Y\n\x14\x64\x65ploy_model_details\x18\x18 \x01(\x0b\x32\x39.google.cloud.automl.v1beta1.DeployModelOperationMetadataH\x00\x12]\n\x16undeploy_model_details\x18\x19 \x01(\x0b\x32;.google.cloud.automl.v1beta1.UndeployModelOperationMetadataH\x00\x12Y\n\x14\x63reate_model_details\x18\n \x01(\x0b\x32\x39.google.cloud.automl.v1beta1.CreateModelOperationMetadataH\x00\x12W\n\x13import_data_details\x18\x0f \x01(\x0b\x32\x38.google.cloud.automl.v1beta1.ImportDataOperationMetadataH\x00\x12[\n\x15\x62\x61tch_predict_details\x18\x10 \x01(\x0b\x32:.google.cloud.automl.v1beta1.BatchPredictOperationMetadataH\x00\x12W\n\x13\x65xport_data_details\x18\x15 \x01(\x0b\x32\x38.google.cloud.automl.v1beta1.ExportDataOperationMetadataH\x00\x12Y\n\x14\x65xport_model_details\x18\x16 \x01(\x0b\x32\x39.google.cloud.automl.v1beta1.ExportModelOperationMetadataH\x00\x12r\n!export_evaluated_examples_details\x18\x1a \x01(\x0b\x32\x45.google.cloud.automl.v1beta1.ExportEvaluatedExamplesOperationMetadataH\x00\x12\x18\n\x10progress_percent\x18\r \x01(\x05\x12,\n\x10partial_failures\x18\x02 \x03(\x0b\x32\x12.google.rpc.Status\x12/\n\x0b\x63reate_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\t\n\x07\x64\x65tails"\x19\n\x17\x44\x65leteOperationMetadata"\x1e\n\x1c\x44\x65ployModelOperationMetadata" \n\x1eUndeployModelOperationMetadata"\x1e\n\x1c\x43reateModelOperationMetadata"\x1d\n\x1bImportDataOperationMetadata"\xef\x01\n\x1b\x45xportDataOperationMetadata\x12\x62\n\x0boutput_info\x18\x01 \x01(\x0b\x32M.google.cloud.automl.v1beta1.ExportDataOperationMetadata.ExportDataOutputInfo\x1al\n\x14\x45xportDataOutputInfo\x12\x1e\n\x14gcs_output_directory\x18\x01 \x01(\tH\x00\x12!\n\x17\x62igquery_output_dataset\x18\x02 \x01(\tH\x00\x42\x11\n\x0foutput_location"\xc3\x02\n\x1d\x42\x61tchPredictOperationMetadata\x12J\n\x0cinput_config\x18\x01 \x01(\x0b\x32\x34.google.cloud.automl.v1beta1.BatchPredictInputConfig\x12\x66\n\x0boutput_info\x18\x02 \x01(\x0b\x32Q.google.cloud.automl.v1beta1.BatchPredictOperationMetadata.BatchPredictOutputInfo\x1an\n\x16\x42\x61tchPredictOutputInfo\x12\x1e\n\x14gcs_output_directory\x18\x01 \x01(\tH\x00\x12!\n\x17\x62igquery_output_dataset\x18\x02 \x01(\tH\x00\x42\x11\n\x0foutput_location"\xbb\x01\n\x1c\x45xportModelOperationMetadata\x12\x64\n\x0boutput_info\x18\x02 \x01(\x0b\x32O.google.cloud.automl.v1beta1.ExportModelOperationMetadata.ExportModelOutputInfo\x1a\x35\n\x15\x45xportModelOutputInfo\x12\x1c\n\x14gcs_output_directory\x18\x01 \x01(\t"\xee\x01\n(ExportEvaluatedExamplesOperationMetadata\x12|\n\x0boutput_info\x18\x02 \x01(\x0b\x32g.google.cloud.automl.v1beta1.ExportEvaluatedExamplesOperationMetadata.ExportEvaluatedExamplesOutputInfo\x1a\x44\n!ExportEvaluatedExamplesOutputInfo\x12\x1f\n\x17\x62igquery_output_dataset\x18\x02 \x01(\tB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3' - ), + serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n2google/cloud/automl_v1beta1/proto/operations.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a*google/cloud/automl_v1beta1/proto/io.proto\x1a-google/cloud/automl_v1beta1/proto/model.proto\x1a\x38google/cloud/automl_v1beta1/proto/model_evaluation.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto\x1a\x1cgoogle/api/annotations.proto"\x8b\x08\n\x11OperationMetadata\x12N\n\x0e\x64\x65lete_details\x18\x08 \x01(\x0b\x32\x34.google.cloud.automl.v1beta1.DeleteOperationMetadataH\x00\x12Y\n\x14\x64\x65ploy_model_details\x18\x18 \x01(\x0b\x32\x39.google.cloud.automl.v1beta1.DeployModelOperationMetadataH\x00\x12]\n\x16undeploy_model_details\x18\x19 \x01(\x0b\x32;.google.cloud.automl.v1beta1.UndeployModelOperationMetadataH\x00\x12Y\n\x14\x63reate_model_details\x18\n \x01(\x0b\x32\x39.google.cloud.automl.v1beta1.CreateModelOperationMetadataH\x00\x12W\n\x13import_data_details\x18\x0f \x01(\x0b\x32\x38.google.cloud.automl.v1beta1.ImportDataOperationMetadataH\x00\x12[\n\x15\x62\x61tch_predict_details\x18\x10 \x01(\x0b\x32:.google.cloud.automl.v1beta1.BatchPredictOperationMetadataH\x00\x12W\n\x13\x65xport_data_details\x18\x15 \x01(\x0b\x32\x38.google.cloud.automl.v1beta1.ExportDataOperationMetadataH\x00\x12Y\n\x14\x65xport_model_details\x18\x16 \x01(\x0b\x32\x39.google.cloud.automl.v1beta1.ExportModelOperationMetadataH\x00\x12r\n!export_evaluated_examples_details\x18\x1a \x01(\x0b\x32\x45.google.cloud.automl.v1beta1.ExportEvaluatedExamplesOperationMetadataH\x00\x12\x18\n\x10progress_percent\x18\r \x01(\x05\x12,\n\x10partial_failures\x18\x02 \x03(\x0b\x32\x12.google.rpc.Status\x12/\n\x0b\x63reate_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12/\n\x0bupdate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\t\n\x07\x64\x65tails"\x19\n\x17\x44\x65leteOperationMetadata"\x1e\n\x1c\x44\x65ployModelOperationMetadata" \n\x1eUndeployModelOperationMetadata"\x1e\n\x1c\x43reateModelOperationMetadata"\x1d\n\x1bImportDataOperationMetadata"\xef\x01\n\x1b\x45xportDataOperationMetadata\x12\x62\n\x0boutput_info\x18\x01 \x01(\x0b\x32M.google.cloud.automl.v1beta1.ExportDataOperationMetadata.ExportDataOutputInfo\x1al\n\x14\x45xportDataOutputInfo\x12\x1e\n\x14gcs_output_directory\x18\x01 \x01(\tH\x00\x12!\n\x17\x62igquery_output_dataset\x18\x02 \x01(\tH\x00\x42\x11\n\x0foutput_location"\xc3\x02\n\x1d\x42\x61tchPredictOperationMetadata\x12J\n\x0cinput_config\x18\x01 \x01(\x0b\x32\x34.google.cloud.automl.v1beta1.BatchPredictInputConfig\x12\x66\n\x0boutput_info\x18\x02 \x01(\x0b\x32Q.google.cloud.automl.v1beta1.BatchPredictOperationMetadata.BatchPredictOutputInfo\x1an\n\x16\x42\x61tchPredictOutputInfo\x12\x1e\n\x14gcs_output_directory\x18\x01 \x01(\tH\x00\x12!\n\x17\x62igquery_output_dataset\x18\x02 \x01(\tH\x00\x42\x11\n\x0foutput_location"\xbb\x01\n\x1c\x45xportModelOperationMetadata\x12\x64\n\x0boutput_info\x18\x02 \x01(\x0b\x32O.google.cloud.automl.v1beta1.ExportModelOperationMetadata.ExportModelOutputInfo\x1a\x35\n\x15\x45xportModelOutputInfo\x12\x1c\n\x14gcs_output_directory\x18\x01 \x01(\t"\xee\x01\n(ExportEvaluatedExamplesOperationMetadata\x12|\n\x0boutput_info\x18\x02 \x01(\x0b\x32g.google.cloud.automl.v1beta1.ExportEvaluatedExamplesOperationMetadata.ExportEvaluatedExamplesOutputInfo\x1a\x44\n!ExportEvaluatedExamplesOutputInfo\x12\x1f\n\x17\x62igquery_output_dataset\x18\x02 \x01(\tB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_io__pb2.DESCRIPTOR, + google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_model__pb2.DESCRIPTOR, + google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_model__evaluation__pb2.DESCRIPTOR, + google_dot_protobuf_dot_empty__pb2.DESCRIPTOR, google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, google_dot_rpc_dot_status__pb2.DESCRIPTOR, + google_dot_api_dot_annotations__pb2.DESCRIPTOR, ], ) @@ -48,6 +52,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="delete_details", @@ -66,6 +71,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="deploy_model_details", @@ -84,6 +90,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="undeploy_model_details", @@ -102,6 +109,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="create_model_details", @@ -120,6 +128,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="import_data_details", @@ -138,6 +147,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="batch_predict_details", @@ -156,6 +166,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="export_data_details", @@ -174,6 +185,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="export_model_details", @@ -192,6 +204,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="export_evaluated_examples_details", @@ -210,6 +223,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="progress_percent", @@ -228,6 +242,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="partial_failures", @@ -246,6 +261,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="create_time", @@ -264,6 +280,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="update_time", @@ -282,6 +299,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -297,11 +315,12 @@ full_name="google.cloud.automl.v1beta1.OperationMetadata.details", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], - ) + ), ], - serialized_start=216, - serialized_end=1251, + serialized_start=350, + serialized_end=1385, ) @@ -311,6 +330,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], extensions=[], nested_types=[], @@ -320,8 +340,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1253, - serialized_end=1278, + serialized_start=1387, + serialized_end=1412, ) @@ -331,6 +351,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], extensions=[], nested_types=[], @@ -340,8 +361,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1280, - serialized_end=1310, + serialized_start=1414, + serialized_end=1444, ) @@ -351,6 +372,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], extensions=[], nested_types=[], @@ -360,8 +382,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1312, - serialized_end=1344, + serialized_start=1446, + serialized_end=1478, ) @@ -371,6 +393,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], extensions=[], nested_types=[], @@ -380,8 +403,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1346, - serialized_end=1376, + serialized_start=1480, + serialized_end=1510, ) @@ -391,6 +414,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], extensions=[], nested_types=[], @@ -400,8 +424,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1378, - serialized_end=1407, + serialized_start=1512, + serialized_end=1541, ) @@ -411,6 +435,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="gcs_output_directory", @@ -421,7 +446,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -429,6 +454,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="bigquery_output_dataset", @@ -439,7 +465,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -447,6 +473,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -462,11 +489,12 @@ full_name="google.cloud.automl.v1beta1.ExportDataOperationMetadata.ExportDataOutputInfo.output_location", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], - ) + ), ], - serialized_start=1541, - serialized_end=1649, + serialized_start=1675, + serialized_end=1783, ) _EXPORTDATAOPERATIONMETADATA = _descriptor.Descriptor( @@ -475,6 +503,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="output_info", @@ -493,18 +522,19 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + create_key=_descriptor._internal_create_key, + ), ], extensions=[], - nested_types=[_EXPORTDATAOPERATIONMETADATA_EXPORTDATAOUTPUTINFO], + nested_types=[_EXPORTDATAOPERATIONMETADATA_EXPORTDATAOUTPUTINFO,], enum_types=[], serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1410, - serialized_end=1649, + serialized_start=1544, + serialized_end=1783, ) @@ -514,6 +544,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="gcs_output_directory", @@ -524,7 +555,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -532,6 +563,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="bigquery_output_dataset", @@ -542,7 +574,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -550,6 +582,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -565,11 +598,12 @@ full_name="google.cloud.automl.v1beta1.BatchPredictOperationMetadata.BatchPredictOutputInfo.output_location", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], - ) + ), ], - serialized_start=1865, - serialized_end=1975, + serialized_start=1999, + serialized_end=2109, ) _BATCHPREDICTOPERATIONMETADATA = _descriptor.Descriptor( @@ -578,6 +612,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="input_config", @@ -596,6 +631,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="output_info", @@ -614,18 +650,19 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], - nested_types=[_BATCHPREDICTOPERATIONMETADATA_BATCHPREDICTOUTPUTINFO], + nested_types=[_BATCHPREDICTOPERATIONMETADATA_BATCHPREDICTOUTPUTINFO,], enum_types=[], serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1652, - serialized_end=1975, + serialized_start=1786, + serialized_end=2109, ) @@ -635,6 +672,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="gcs_output_directory", @@ -645,7 +683,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -653,7 +691,8 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + create_key=_descriptor._internal_create_key, + ), ], extensions=[], nested_types=[], @@ -663,8 +702,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2112, - serialized_end=2165, + serialized_start=2246, + serialized_end=2299, ) _EXPORTMODELOPERATIONMETADATA = _descriptor.Descriptor( @@ -673,6 +712,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="output_info", @@ -691,18 +731,19 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + create_key=_descriptor._internal_create_key, + ), ], extensions=[], - nested_types=[_EXPORTMODELOPERATIONMETADATA_EXPORTMODELOUTPUTINFO], + nested_types=[_EXPORTMODELOPERATIONMETADATA_EXPORTMODELOUTPUTINFO,], enum_types=[], serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1978, - serialized_end=2165, + serialized_start=2112, + serialized_end=2299, ) @@ -712,6 +753,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="bigquery_output_dataset", @@ -722,7 +764,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -730,7 +772,8 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + create_key=_descriptor._internal_create_key, + ), ], extensions=[], nested_types=[], @@ -740,8 +783,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2338, - serialized_end=2406, + serialized_start=2472, + serialized_end=2540, ) _EXPORTEVALUATEDEXAMPLESOPERATIONMETADATA = _descriptor.Descriptor( @@ -750,6 +793,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="output_info", @@ -768,11 +812,12 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + create_key=_descriptor._internal_create_key, + ), ], extensions=[], nested_types=[ - _EXPORTEVALUATEDEXAMPLESOPERATIONMETADATA_EXPORTEVALUATEDEXAMPLESOUTPUTINFO + _EXPORTEVALUATEDEXAMPLESOPERATIONMETADATA_EXPORTEVALUATEDEXAMPLESOUTPUTINFO, ], enum_types=[], serialized_options=None, @@ -780,8 +825,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2168, - serialized_end=2406, + serialized_start=2302, + serialized_end=2540, ) _OPERATIONMETADATA.fields_by_name[ @@ -984,12 +1029,11 @@ OperationMetadata = _reflection.GeneratedProtocolMessageType( "OperationMetadata", (_message.Message,), - dict( - DESCRIPTOR=_OPERATIONMETADATA, - __module__="google.cloud.automl_v1beta1.proto.operations_pb2", - __doc__="""Metadata used across all long running operations returned - by AutoML API. - + { + "DESCRIPTOR": _OPERATIONMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", + "__doc__": """Metadata used across all long running operations returned by AutoML + API. Attributes: details: @@ -1019,7 +1063,7 @@ currently. partial_failures: Output only. Partial failures encountered. E.g. single files - that couldn't be read. This field should never exceed 20 + that couldn’t be read. This field should never exceed 20 entries. Status details field will contain standard GCP error details. create_time: @@ -1029,95 +1073,83 @@ time. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.OperationMetadata) - ), + }, ) _sym_db.RegisterMessage(OperationMetadata) DeleteOperationMetadata = _reflection.GeneratedProtocolMessageType( "DeleteOperationMetadata", (_message.Message,), - dict( - DESCRIPTOR=_DELETEOPERATIONMETADATA, - __module__="google.cloud.automl_v1beta1.proto.operations_pb2", - __doc__="""Details of operations that perform deletes of any - entities. - - """, + { + "DESCRIPTOR": _DELETEOPERATIONMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", + "__doc__": """Details of operations that perform deletes of any entities.""", # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.DeleteOperationMetadata) - ), + }, ) _sym_db.RegisterMessage(DeleteOperationMetadata) DeployModelOperationMetadata = _reflection.GeneratedProtocolMessageType( "DeployModelOperationMetadata", (_message.Message,), - dict( - DESCRIPTOR=_DEPLOYMODELOPERATIONMETADATA, - __module__="google.cloud.automl_v1beta1.proto.operations_pb2", - __doc__="""Details of DeployModel operation. - - """, + { + "DESCRIPTOR": _DEPLOYMODELOPERATIONMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", + "__doc__": """Details of DeployModel operation.""", # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.DeployModelOperationMetadata) - ), + }, ) _sym_db.RegisterMessage(DeployModelOperationMetadata) UndeployModelOperationMetadata = _reflection.GeneratedProtocolMessageType( "UndeployModelOperationMetadata", (_message.Message,), - dict( - DESCRIPTOR=_UNDEPLOYMODELOPERATIONMETADATA, - __module__="google.cloud.automl_v1beta1.proto.operations_pb2", - __doc__="""Details of UndeployModel operation. - - """, + { + "DESCRIPTOR": _UNDEPLOYMODELOPERATIONMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", + "__doc__": """Details of UndeployModel operation.""", # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.UndeployModelOperationMetadata) - ), + }, ) _sym_db.RegisterMessage(UndeployModelOperationMetadata) CreateModelOperationMetadata = _reflection.GeneratedProtocolMessageType( "CreateModelOperationMetadata", (_message.Message,), - dict( - DESCRIPTOR=_CREATEMODELOPERATIONMETADATA, - __module__="google.cloud.automl_v1beta1.proto.operations_pb2", - __doc__="""Details of CreateModel operation. - - """, + { + "DESCRIPTOR": _CREATEMODELOPERATIONMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", + "__doc__": """Details of CreateModel operation.""", # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.CreateModelOperationMetadata) - ), + }, ) _sym_db.RegisterMessage(CreateModelOperationMetadata) ImportDataOperationMetadata = _reflection.GeneratedProtocolMessageType( "ImportDataOperationMetadata", (_message.Message,), - dict( - DESCRIPTOR=_IMPORTDATAOPERATIONMETADATA, - __module__="google.cloud.automl_v1beta1.proto.operations_pb2", - __doc__="""Details of ImportData operation. - - """, + { + "DESCRIPTOR": _IMPORTDATAOPERATIONMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", + "__doc__": """Details of ImportData operation.""", # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ImportDataOperationMetadata) - ), + }, ) _sym_db.RegisterMessage(ImportDataOperationMetadata) ExportDataOperationMetadata = _reflection.GeneratedProtocolMessageType( "ExportDataOperationMetadata", (_message.Message,), - dict( - ExportDataOutputInfo=_reflection.GeneratedProtocolMessageType( + { + "ExportDataOutputInfo": _reflection.GeneratedProtocolMessageType( "ExportDataOutputInfo", (_message.Message,), - dict( - DESCRIPTOR=_EXPORTDATAOPERATIONMETADATA_EXPORTDATAOUTPUTINFO, - __module__="google.cloud.automl_v1beta1.proto.operations_pb2", - __doc__="""Further describes this export data's output. Supplements + { + "DESCRIPTOR": _EXPORTDATAOPERATIONMETADATA_EXPORTDATAOUTPUTINFO, + "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", + "__doc__": """Further describes this export data’s output. Supplements [OutputConfig][google.cloud.automl.v1beta1.OutputConfig]. - Attributes: output_location: The output location to which the exported data is written. @@ -1130,20 +1162,19 @@ data is written. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ExportDataOperationMetadata.ExportDataOutputInfo) - ), + }, ), - DESCRIPTOR=_EXPORTDATAOPERATIONMETADATA, - __module__="google.cloud.automl_v1beta1.proto.operations_pb2", - __doc__="""Details of ExportData operation. - + "DESCRIPTOR": _EXPORTDATAOPERATIONMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", + "__doc__": """Details of ExportData operation. Attributes: output_info: - Output only. Information further describing this export data's + Output only. Information further describing this export data’s output. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ExportDataOperationMetadata) - ), + }, ) _sym_db.RegisterMessage(ExportDataOperationMetadata) _sym_db.RegisterMessage(ExportDataOperationMetadata.ExportDataOutputInfo) @@ -1151,17 +1182,16 @@ BatchPredictOperationMetadata = _reflection.GeneratedProtocolMessageType( "BatchPredictOperationMetadata", (_message.Message,), - dict( - BatchPredictOutputInfo=_reflection.GeneratedProtocolMessageType( + { + "BatchPredictOutputInfo": _reflection.GeneratedProtocolMessageType( "BatchPredictOutputInfo", (_message.Message,), - dict( - DESCRIPTOR=_BATCHPREDICTOPERATIONMETADATA_BATCHPREDICTOUTPUTINFO, - __module__="google.cloud.automl_v1beta1.proto.operations_pb2", - __doc__="""Further describes this batch predict's output. Supplements - - [BatchPredictOutputConfig][google.cloud.automl.v1beta1.BatchPredictOutputConfig]. - + { + "DESCRIPTOR": _BATCHPREDICTOPERATIONMETADATA_BATCHPREDICTOUTPUTINFO, + "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", + "__doc__": """Further describes this batch predict’s output. Supplements [BatchPred + ictOutputConfig][google.cloud.automl.v1beta1.BatchPredictOutputConfig] + . Attributes: output_location: @@ -1175,12 +1205,11 @@ output is written. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.BatchPredictOperationMetadata.BatchPredictOutputInfo) - ), + }, ), - DESCRIPTOR=_BATCHPREDICTOPERATIONMETADATA, - __module__="google.cloud.automl_v1beta1.proto.operations_pb2", - __doc__="""Details of BatchPredict operation. - + "DESCRIPTOR": _BATCHPREDICTOPERATIONMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", + "__doc__": """Details of BatchPredict operation. Attributes: input_config: @@ -1188,10 +1217,10 @@ this batch predict operation. output_info: Output only. Information further describing this batch - predict's output. + predict’s output. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.BatchPredictOperationMetadata) - ), + }, ) _sym_db.RegisterMessage(BatchPredictOperationMetadata) _sym_db.RegisterMessage(BatchPredictOperationMetadata.BatchPredictOutputInfo) @@ -1199,17 +1228,15 @@ ExportModelOperationMetadata = _reflection.GeneratedProtocolMessageType( "ExportModelOperationMetadata", (_message.Message,), - dict( - ExportModelOutputInfo=_reflection.GeneratedProtocolMessageType( + { + "ExportModelOutputInfo": _reflection.GeneratedProtocolMessageType( "ExportModelOutputInfo", (_message.Message,), - dict( - DESCRIPTOR=_EXPORTMODELOPERATIONMETADATA_EXPORTMODELOUTPUTINFO, - __module__="google.cloud.automl_v1beta1.proto.operations_pb2", - __doc__="""Further describes the output of model export. Supplements - - [ModelExportOutputConfig][google.cloud.automl.v1beta1.ModelExportOutputConfig]. - + { + "DESCRIPTOR": _EXPORTMODELOPERATIONMETADATA_EXPORTMODELOUTPUTINFO, + "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", + "__doc__": """Further describes the output of model export. Supplements [ModelExpor + tOutputConfig][google.cloud.automl.v1beta1.ModelExportOutputConfig]. Attributes: gcs_output_directory: @@ -1217,12 +1244,11 @@ into which the model will be exported. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ExportModelOperationMetadata.ExportModelOutputInfo) - ), + }, ), - DESCRIPTOR=_EXPORTMODELOPERATIONMETADATA, - __module__="google.cloud.automl_v1beta1.proto.operations_pb2", - __doc__="""Details of ExportModel operation. - + "DESCRIPTOR": _EXPORTMODELOPERATIONMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", + "__doc__": """Details of ExportModel operation. Attributes: output_info: @@ -1230,7 +1256,7 @@ model export. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ExportModelOperationMetadata) - ), + }, ) _sym_db.RegisterMessage(ExportModelOperationMetadata) _sym_db.RegisterMessage(ExportModelOperationMetadata.ExportModelOutputInfo) @@ -1238,18 +1264,16 @@ ExportEvaluatedExamplesOperationMetadata = _reflection.GeneratedProtocolMessageType( "ExportEvaluatedExamplesOperationMetadata", (_message.Message,), - dict( - ExportEvaluatedExamplesOutputInfo=_reflection.GeneratedProtocolMessageType( + { + "ExportEvaluatedExamplesOutputInfo": _reflection.GeneratedProtocolMessageType( "ExportEvaluatedExamplesOutputInfo", (_message.Message,), - dict( - DESCRIPTOR=_EXPORTEVALUATEDEXAMPLESOPERATIONMETADATA_EXPORTEVALUATEDEXAMPLESOUTPUTINFO, - __module__="google.cloud.automl_v1beta1.proto.operations_pb2", - __doc__="""Further describes the output of the evaluated examples - export. Supplements - - [ExportEvaluatedExamplesOutputConfig][google.cloud.automl.v1beta1.ExportEvaluatedExamplesOutputConfig]. - + { + "DESCRIPTOR": _EXPORTEVALUATEDEXAMPLESOPERATIONMETADATA_EXPORTEVALUATEDEXAMPLESOUTPUTINFO, + "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", + "__doc__": """Further describes the output of the evaluated examples export. + Supplements [ExportEvaluatedExamplesOutputConfig][google.cloud.automl + .v1beta1.ExportEvaluatedExamplesOutputConfig]. Attributes: bigquery_output_dataset: @@ -1258,12 +1282,11 @@ export evaluated examples is written. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ExportEvaluatedExamplesOperationMetadata.ExportEvaluatedExamplesOutputInfo) - ), + }, ), - DESCRIPTOR=_EXPORTEVALUATEDEXAMPLESOPERATIONMETADATA, - __module__="google.cloud.automl_v1beta1.proto.operations_pb2", - __doc__="""Details of EvaluatedExamples operation. - + "DESCRIPTOR": _EXPORTEVALUATEDEXAMPLESOPERATIONMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.operations_pb2", + "__doc__": """Details of EvaluatedExamples operation. Attributes: output_info: @@ -1271,7 +1294,7 @@ evaluated examples export. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ExportEvaluatedExamplesOperationMetadata) - ), + }, ) _sym_db.RegisterMessage(ExportEvaluatedExamplesOperationMetadata) _sym_db.RegisterMessage( diff --git a/google/cloud/automl_v1beta1/proto/prediction_service.proto b/google/cloud/automl_v1beta1/proto/prediction_service.proto index 57f1b794..0bcf685e 100644 --- a/google/cloud/automl_v1beta1/proto/prediction_service.proto +++ b/google/cloud/automl_v1beta1/proto/prediction_service.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; @@ -19,6 +18,8 @@ package google.cloud.automl.v1beta1; import "google/api/annotations.proto"; import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; import "google/cloud/automl/v1beta1/annotation_payload.proto"; import "google/cloud/automl/v1beta1/data_items.proto"; import "google/cloud/automl/v1beta1/io.proto"; @@ -38,8 +39,7 @@ option ruby_package = "Google::Cloud::AutoML::V1beta1"; // snake_case or kebab-case, either of those cases is accepted. service PredictionService { option (google.api.default_host) = "automl.googleapis.com"; - option (google.api.oauth_scopes) = - "https://www.googleapis.com/auth/cloud-platform"; + option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform"; // Perform an online prediction. The prediction result will be directly // returned in the response. @@ -65,16 +65,15 @@ service PredictionService { post: "/v1beta1/{name=projects/*/locations/*/models/*}:predict" body: "*" }; + option (google.api.method_signature) = "name,payload,params"; } - // Perform a batch prediction. Unlike the online - // [Predict][google.cloud.automl.v1beta1.PredictionService.Predict], batch + // Perform a batch prediction. Unlike the online [Predict][google.cloud.automl.v1beta1.PredictionService.Predict], batch // prediction result won't be immediately available in the response. Instead, // a long running operation object is returned. User can poll the operation // result via [GetOperation][google.longrunning.Operations.GetOperation] - // method. Once the operation is done, - // [BatchPredictResult][google.cloud.automl.v1beta1.BatchPredictResult] is - // returned in the [response][google.longrunning.Operation.response] field. + // method. Once the operation is done, [BatchPredictResult][google.cloud.automl.v1beta1.BatchPredictResult] is returned in + // the [response][google.longrunning.Operation.response] field. // Available for following ML problems: // * Image Classification // * Image Object Detection @@ -86,18 +85,27 @@ service PredictionService { post: "/v1beta1/{name=projects/*/locations/*/models/*}:batchPredict" body: "*" }; + option (google.api.method_signature) = "name,input_config,output_config,params"; + option (google.longrunning.operation_info) = { + response_type: "BatchPredictResult" + metadata_type: "OperationMetadata" + }; } } -// Request message for -// [PredictionService.Predict][google.cloud.automl.v1beta1.PredictionService.Predict]. +// Request message for [PredictionService.Predict][google.cloud.automl.v1beta1.PredictionService.Predict]. message PredictRequest { - // Name of the model requested to serve the prediction. - string name = 1; + // Required. Name of the model requested to serve the prediction. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "automl.googleapis.com/Model" + } + ]; // Required. Payload to perform a prediction on. The payload must match the // problem type that the model was trained to solve. - ExamplePayload payload = 2; + ExamplePayload payload = 2 [(google.api.field_behavior) = REQUIRED]; // Additional domain-specific parameters, any string must be up to 25000 // characters long. @@ -116,18 +124,13 @@ message PredictRequest { // boxes will be returned in the response. Default is 100, the // requested value may be limited by server. // * For Tables: - // `feature_importance` - (boolean) Whether - // - // [feature_importance][[google.cloud.automl.v1beta1.TablesModelColumnInfo.feature_importance] - // should be populated in the returned - // - // [TablesAnnotation(-s)][[google.cloud.automl.v1beta1.TablesAnnotation]. + // feature_importance - (boolean) Whether feature importance + // should be populated in the returned TablesAnnotation. // The default is false. map params = 3; } -// Response message for -// [PredictionService.Predict][google.cloud.automl.v1beta1.PredictionService.Predict]. +// Response message for [PredictionService.Predict][google.cloud.automl.v1beta1.PredictionService.Predict]. message PredictResponse { // Prediction result. // Translation and Text Sentiment will return precisely one payload. @@ -158,20 +161,24 @@ message PredictResponse { map metadata = 2; } -// Request message for -// [PredictionService.BatchPredict][google.cloud.automl.v1beta1.PredictionService.BatchPredict]. +// Request message for [PredictionService.BatchPredict][google.cloud.automl.v1beta1.PredictionService.BatchPredict]. message BatchPredictRequest { - // Name of the model requested to serve the batch prediction. - string name = 1; + // Required. Name of the model requested to serve the batch prediction. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "automl.googleapis.com/Model" + } + ]; // Required. The input configuration for batch prediction. - BatchPredictInputConfig input_config = 3; + BatchPredictInputConfig input_config = 3 [(google.api.field_behavior) = REQUIRED]; // Required. The Configuration specifying where output predictions should // be written. - BatchPredictOutputConfig output_config = 4; + BatchPredictOutputConfig output_config = 4 [(google.api.field_behavior) = REQUIRED]; - // Additional domain-specific parameters for the predictions, any string must + // Required. Additional domain-specific parameters for the predictions, any string must // be up to 25000 characters long. // // * For Text Classification: @@ -196,6 +203,7 @@ message BatchPredictRequest { // requested value may be limited by server. // // * For Video Classification : + // // `score_threshold` - (float) A value from 0.0 to 1.0. When the model // makes predictions for a video, it will only produce results that // have at least this confidence score. The default is 0.5. @@ -223,7 +231,14 @@ message BatchPredictRequest { // metrics provided to describe that quality. The default is // "false". // + // * For Tables: + // + // feature_importance - (boolean) Whether feature importance + // should be populated in the returned TablesAnnotations. The + // default is false. + // // * For Video Object Tracking: + // // `score_threshold` - (float) When Model detects objects on video frames, // it will only produce bounding boxes which have at least this // confidence score. Value in 0 to 1 range, default is 0.5. @@ -233,14 +248,12 @@ message BatchPredictRequest { // `min_bounding_box_size` - (float) Only bounding boxes with shortest edge // at least that long as a relative value of video frame size will be // returned. Value in 0 to 1 range. Default is 0. - // - map params = 5; + map params = 5 [(google.api.field_behavior) = REQUIRED]; } // Result of the Batch Predict. This message is returned in // [response][google.longrunning.Operation.response] of the operation returned -// by the -// [PredictionService.BatchPredict][google.cloud.automl.v1beta1.PredictionService.BatchPredict]. +// by the [PredictionService.BatchPredict][google.cloud.automl.v1beta1.PredictionService.BatchPredict]. message BatchPredictResult { // Additional domain-specific prediction response metadata. // diff --git a/google/cloud/automl_v1beta1/proto/prediction_service_pb2.py b/google/cloud/automl_v1beta1/proto/prediction_service_pb2.py index 751f16ef..b22759e8 100644 --- a/google/cloud/automl_v1beta1/proto/prediction_service_pb2.py +++ b/google/cloud/automl_v1beta1/proto/prediction_service_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/prediction_service.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -17,6 +14,8 @@ from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 from google.api import client_pb2 as google_dot_api_dot_client__pb2 +from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 +from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 from google.cloud.automl_v1beta1.proto import ( annotation_payload_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_annotation__payload__pb2, ) @@ -38,15 +37,14 @@ name="google/cloud/automl_v1beta1/proto/prediction_service.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1B\026PredictionServiceProtoP\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - '\n:google/cloud/automl_v1beta1/proto/prediction_service.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a:google/cloud/automl_v1beta1/proto/annotation_payload.proto\x1a\x32google/cloud/automl_v1beta1/proto/data_items.proto\x1a*google/cloud/automl_v1beta1/proto/io.proto\x1a\x32google/cloud/automl_v1beta1/proto/operations.proto\x1a#google/longrunning/operations.proto"\xd4\x01\n\x0ePredictRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12<\n\x07payload\x18\x02 \x01(\x0b\x32+.google.cloud.automl.v1beta1.ExamplePayload\x12G\n\x06params\x18\x03 \x03(\x0b\x32\x37.google.cloud.automl.v1beta1.PredictRequest.ParamsEntry\x1a-\n\x0bParamsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\x9a\x02\n\x0fPredictResponse\x12?\n\x07payload\x18\x01 \x03(\x0b\x32..google.cloud.automl.v1beta1.AnnotationPayload\x12G\n\x12preprocessed_input\x18\x03 \x01(\x0b\x32+.google.cloud.automl.v1beta1.ExamplePayload\x12L\n\x08metadata\x18\x02 \x03(\x0b\x32:.google.cloud.automl.v1beta1.PredictResponse.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\xba\x02\n\x13\x42\x61tchPredictRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12J\n\x0cinput_config\x18\x03 \x01(\x0b\x32\x34.google.cloud.automl.v1beta1.BatchPredictInputConfig\x12L\n\routput_config\x18\x04 \x01(\x0b\x32\x35.google.cloud.automl.v1beta1.BatchPredictOutputConfig\x12L\n\x06params\x18\x05 \x03(\x0b\x32<.google.cloud.automl.v1beta1.BatchPredictRequest.ParamsEntry\x1a-\n\x0bParamsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01"\x96\x01\n\x12\x42\x61tchPredictResult\x12O\n\x08metadata\x18\x01 \x03(\x0b\x32=.google.cloud.automl.v1beta1.BatchPredictResult.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x32\xb4\x03\n\x11PredictionService\x12\xa8\x01\n\x07Predict\x12+.google.cloud.automl.v1beta1.PredictRequest\x1a,.google.cloud.automl.v1beta1.PredictResponse"B\x82\xd3\xe4\x93\x02<"7/v1beta1/{name=projects/*/locations/*/models/*}:predict:\x01*\x12\xa8\x01\n\x0c\x42\x61tchPredict\x12\x30.google.cloud.automl.v1beta1.BatchPredictRequest\x1a\x1d.google.longrunning.Operation"G\x82\xd3\xe4\x93\x02\x41"\n\x0cinput_config\x18\x03 \x01(\x0b\x32(.google.cloud.automl.v1beta1.InputConfig"c\n\x11\x45xportDataRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12@\n\routput_config\x18\x03 \x01(\x0b\x32).google.cloud.automl.v1beta1.OutputConfig"(\n\x18GetAnnotationSpecRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"S\n\x13GetTableSpecRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12.\n\nfield_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"\x8e\x01\n\x15ListTableSpecsRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12.\n\nfield_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\x12\x0e\n\x06\x66ilter\x18\x03 \x01(\t\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x06 \x01(\t"n\n\x16ListTableSpecsResponse\x12;\n\x0btable_specs\x18\x01 \x03(\x0b\x32&.google.cloud.automl.v1beta1.TableSpec\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"\x85\x01\n\x16UpdateTableSpecRequest\x12:\n\ntable_spec\x18\x01 \x01(\x0b\x32&.google.cloud.automl.v1beta1.TableSpec\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"T\n\x14GetColumnSpecRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12.\n\nfield_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"\x8f\x01\n\x16ListColumnSpecsRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12.\n\nfield_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\x12\x0e\n\x06\x66ilter\x18\x03 \x01(\t\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x06 \x01(\t"q\n\x17ListColumnSpecsResponse\x12=\n\x0c\x63olumn_specs\x18\x01 \x03(\x0b\x32\'.google.cloud.automl.v1beta1.ColumnSpec\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"\x88\x01\n\x17UpdateColumnSpecRequest\x12<\n\x0b\x63olumn_spec\x18\x01 \x01(\x0b\x32\'.google.cloud.automl.v1beta1.ColumnSpec\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"W\n\x12\x43reateModelRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x31\n\x05model\x18\x04 \x01(\x0b\x32".google.cloud.automl.v1beta1.Model"\x1f\n\x0fGetModelRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"Z\n\x11ListModelsRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x0e\n\x06\x66ilter\x18\x03 \x01(\t\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x06 \x01(\t"`\n\x12ListModelsResponse\x12\x31\n\x05model\x18\x01 \x03(\x0b\x32".google.cloud.automl.v1beta1.Model\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t""\n\x12\x44\x65leteModelRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"\xca\x02\n\x12\x44\x65ployModelRequest\x12\x84\x01\n0image_object_detection_model_deployment_metadata\x18\x02 \x01(\x0b\x32H.google.cloud.automl.v1beta1.ImageObjectDetectionModelDeploymentMetadataH\x00\x12\x81\x01\n.image_classification_model_deployment_metadata\x18\x04 \x01(\x0b\x32G.google.cloud.automl.v1beta1.ImageClassificationModelDeploymentMetadataH\x00\x12\x0c\n\x04name\x18\x01 \x01(\tB\x1b\n\x19model_deployment_metadata"$\n\x14UndeployModelRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"o\n\x12\x45xportModelRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12K\n\routput_config\x18\x03 \x01(\x0b\x32\x34.google.cloud.automl.v1beta1.ModelExportOutputConfig"\x87\x01\n\x1e\x45xportEvaluatedExamplesRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\x12W\n\routput_config\x18\x03 \x01(\x0b\x32@.google.cloud.automl.v1beta1.ExportEvaluatedExamplesOutputConfig")\n\x19GetModelEvaluationRequest\x12\x0c\n\x04name\x18\x01 \x01(\t"d\n\x1bListModelEvaluationsRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x0e\n\x06\x66ilter\x18\x03 \x01(\t\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x06 \x01(\t"\x7f\n\x1cListModelEvaluationsResponse\x12\x46\n\x10model_evaluation\x18\x01 \x03(\x0b\x32,.google.cloud.automl.v1beta1.ModelEvaluation\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t2\xd9"\n\x06\x41utoMl\x12\xac\x01\n\rCreateDataset\x12\x31.google.cloud.automl.v1beta1.CreateDatasetRequest\x1a$.google.cloud.automl.v1beta1.Dataset"B\x82\xd3\xe4\x93\x02<"1/v1beta1/{parent=projects/*/locations/*}/datasets:\x07\x64\x61taset\x12\x9d\x01\n\nGetDataset\x12..google.cloud.automl.v1beta1.GetDatasetRequest\x1a$.google.cloud.automl.v1beta1.Dataset"9\x82\xd3\xe4\x93\x02\x33\x12\x31/v1beta1/{name=projects/*/locations/*/datasets/*}\x12\xae\x01\n\x0cListDatasets\x12\x30.google.cloud.automl.v1beta1.ListDatasetsRequest\x1a\x31.google.cloud.automl.v1beta1.ListDatasetsResponse"9\x82\xd3\xe4\x93\x02\x33\x12\x31/v1beta1/{parent=projects/*/locations/*}/datasets\x12\xb4\x01\n\rUpdateDataset\x12\x31.google.cloud.automl.v1beta1.UpdateDatasetRequest\x1a$.google.cloud.automl.v1beta1.Dataset"J\x82\xd3\xe4\x93\x02\x44\x32\x39/v1beta1/{dataset.name=projects/*/locations/*/datasets/*}:\x07\x64\x61taset\x12\x9c\x01\n\rDeleteDataset\x12\x31.google.cloud.automl.v1beta1.DeleteDatasetRequest\x1a\x1d.google.longrunning.Operation"9\x82\xd3\xe4\x93\x02\x33*1/v1beta1/{name=projects/*/locations/*/datasets/*}\x12\xa4\x01\n\nImportData\x12..google.cloud.automl.v1beta1.ImportDataRequest\x1a\x1d.google.longrunning.Operation"G\x82\xd3\xe4\x93\x02\x41"/v1beta1/{name=projects/*/locations/*/datasets/*/tableSpecs/*}\x12\xc1\x01\n\x0eListTableSpecs\x12\x32.google.cloud.automl.v1beta1.ListTableSpecsRequest\x1a\x33.google.cloud.automl.v1beta1.ListTableSpecsResponse"F\x82\xd3\xe4\x93\x02@\x12>/v1beta1/{parent=projects/*/locations/*/datasets/*}/tableSpecs\x12\xcd\x01\n\x0fUpdateTableSpec\x12\x33.google.cloud.automl.v1beta1.UpdateTableSpecRequest\x1a&.google.cloud.automl.v1beta1.TableSpec"]\x82\xd3\xe4\x93\x02W2I/v1beta1/{table_spec.name=projects/*/locations/*/datasets/*/tableSpecs/*}:\ntable_spec\x12\xc1\x01\n\rGetColumnSpec\x12\x31.google.cloud.automl.v1beta1.GetColumnSpecRequest\x1a\'.google.cloud.automl.v1beta1.ColumnSpec"T\x82\xd3\xe4\x93\x02N\x12L/v1beta1/{name=projects/*/locations/*/datasets/*/tableSpecs/*/columnSpecs/*}\x12\xd2\x01\n\x0fListColumnSpecs\x12\x33.google.cloud.automl.v1beta1.ListColumnSpecsRequest\x1a\x34.google.cloud.automl.v1beta1.ListColumnSpecsResponse"T\x82\xd3\xe4\x93\x02N\x12L/v1beta1/{parent=projects/*/locations/*/datasets/*/tableSpecs/*}/columnSpecs\x12\xe0\x01\n\x10UpdateColumnSpec\x12\x34.google.cloud.automl.v1beta1.UpdateColumnSpecRequest\x1a\'.google.cloud.automl.v1beta1.ColumnSpec"m\x82\xd3\xe4\x93\x02g2X/v1beta1/{column_spec.name=projects/*/locations/*/datasets/*/tableSpecs/*/columnSpecs/*}:\x0b\x63olumn_spec\x12\x9d\x01\n\x0b\x43reateModel\x12/.google.cloud.automl.v1beta1.CreateModelRequest\x1a\x1d.google.longrunning.Operation">\x82\xd3\xe4\x93\x02\x38"//v1beta1/{parent=projects/*/locations/*}/models:\x05model\x12\x95\x01\n\x08GetModel\x12,.google.cloud.automl.v1beta1.GetModelRequest\x1a".google.cloud.automl.v1beta1.Model"7\x82\xd3\xe4\x93\x02\x31\x12//v1beta1/{name=projects/*/locations/*/models/*}\x12\xa6\x01\n\nListModels\x12..google.cloud.automl.v1beta1.ListModelsRequest\x1a/.google.cloud.automl.v1beta1.ListModelsResponse"7\x82\xd3\xe4\x93\x02\x31\x12//v1beta1/{parent=projects/*/locations/*}/models\x12\x96\x01\n\x0b\x44\x65leteModel\x12/.google.cloud.automl.v1beta1.DeleteModelRequest\x1a\x1d.google.longrunning.Operation"7\x82\xd3\xe4\x93\x02\x31*//v1beta1/{name=projects/*/locations/*/models/*}\x12\xa0\x01\n\x0b\x44\x65ployModel\x12/.google.cloud.automl.v1beta1.DeployModelRequest\x1a\x1d.google.longrunning.Operation"A\x82\xd3\xe4\x93\x02;"6/v1beta1/{name=projects/*/locations/*/models/*}:deploy:\x01*\x12\xa6\x01\n\rUndeployModel\x12\x31.google.cloud.automl.v1beta1.UndeployModelRequest\x1a\x1d.google.longrunning.Operation"C\x82\xd3\xe4\x93\x02="8/v1beta1/{name=projects/*/locations/*/models/*}:undeploy:\x01*\x12\xa0\x01\n\x0b\x45xportModel\x12/.google.cloud.automl.v1beta1.ExportModelRequest\x1a\x1d.google.longrunning.Operation"A\x82\xd3\xe4\x93\x02;"6/v1beta1/{name=projects/*/locations/*/models/*}:export:\x01*\x12\xc9\x01\n\x17\x45xportEvaluatedExamples\x12;.google.cloud.automl.v1beta1.ExportEvaluatedExamplesRequest\x1a\x1d.google.longrunning.Operation"R\x82\xd3\xe4\x93\x02L"G/v1beta1/{name=projects/*/locations/*/models/*}:exportEvaluatedExamples:\x01*\x12\xc6\x01\n\x12GetModelEvaluation\x12\x36.google.cloud.automl.v1beta1.GetModelEvaluationRequest\x1a,.google.cloud.automl.v1beta1.ModelEvaluation"J\x82\xd3\xe4\x93\x02\x44\x12\x42/v1beta1/{name=projects/*/locations/*/models/*/modelEvaluations/*}\x12\xd7\x01\n\x14ListModelEvaluations\x12\x38.google.cloud.automl.v1beta1.ListModelEvaluationsRequest\x1a\x39.google.cloud.automl.v1beta1.ListModelEvaluationsResponse"J\x82\xd3\xe4\x93\x02\x44\x12\x42/v1beta1/{parent=projects/*/locations/*/models/*}/modelEvaluations\x1aI\xca\x41\x15\x61utoml.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\xb2\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\x0b\x41utoMlProtoP\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3' - ), + serialized_options=b"\n\037com.google.cloud.automl.v1beta1B\013AutoMlProtoP\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n/google/cloud/automl_v1beta1/proto/service.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a:google/cloud/automl_v1beta1/proto/annotation_payload.proto\x1a\x37google/cloud/automl_v1beta1/proto/annotation_spec.proto\x1a\x33google/cloud/automl_v1beta1/proto/column_spec.proto\x1a/google/cloud/automl_v1beta1/proto/dataset.proto\x1a-google/cloud/automl_v1beta1/proto/image.proto\x1a*google/cloud/automl_v1beta1/proto/io.proto\x1a-google/cloud/automl_v1beta1/proto/model.proto\x1a\x38google/cloud/automl_v1beta1/proto/model_evaluation.proto\x1a\x32google/cloud/automl_v1beta1/proto/operations.proto\x1a\x32google/cloud/automl_v1beta1/proto/table_spec.proto\x1a#google/longrunning/operations.proto\x1a google/protobuf/field_mask.proto"\x8d\x01\n\x14\x43reateDatasetRequest\x12\x39\n\x06parent\x18\x01 \x01(\tB)\xe0\x41\x02\xfa\x41#\n!locations.googleapis.com/Location\x12:\n\x07\x64\x61taset\x18\x02 \x01(\x0b\x32$.google.cloud.automl.v1beta1.DatasetB\x03\xe0\x41\x02"H\n\x11GetDatasetRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x61utoml.googleapis.com/Dataset"\x87\x01\n\x13ListDatasetsRequest\x12\x39\n\x06parent\x18\x01 \x01(\tB)\xe0\x41\x02\xfa\x41#\n!locations.googleapis.com/Location\x12\x0e\n\x06\x66ilter\x18\x03 \x01(\t\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x06 \x01(\t"g\n\x14ListDatasetsResponse\x12\x36\n\x08\x64\x61tasets\x18\x01 \x03(\x0b\x32$.google.cloud.automl.v1beta1.Dataset\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"\x83\x01\n\x14UpdateDatasetRequest\x12:\n\x07\x64\x61taset\x18\x01 \x01(\x0b\x32$.google.cloud.automl.v1beta1.DatasetB\x03\xe0\x41\x02\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"K\n\x14\x44\x65leteDatasetRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x61utoml.googleapis.com/Dataset"\x8d\x01\n\x11ImportDataRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x61utoml.googleapis.com/Dataset\x12\x43\n\x0cinput_config\x18\x03 \x01(\x0b\x32(.google.cloud.automl.v1beta1.InputConfigB\x03\xe0\x41\x02"\x8f\x01\n\x11\x45xportDataRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x61utoml.googleapis.com/Dataset\x12\x45\n\routput_config\x18\x03 \x01(\x0b\x32).google.cloud.automl.v1beta1.OutputConfigB\x03\xe0\x41\x02"V\n\x18GetAnnotationSpecRequest\x12:\n\x04name\x18\x01 \x01(\tB,\xe0\x41\x02\xfa\x41&\n$automl.googleapis.com/AnnotationSpec"|\n\x13GetTableSpecRequest\x12\x35\n\x04name\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x61utoml.googleapis.com/TableSpec\x12.\n\nfield_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"\xb5\x01\n\x15ListTableSpecsRequest\x12\x35\n\x06parent\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1d\x61utoml.googleapis.com/Dataset\x12.\n\nfield_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\x12\x0e\n\x06\x66ilter\x18\x03 \x01(\t\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x06 \x01(\t"n\n\x16ListTableSpecsResponse\x12;\n\x0btable_specs\x18\x01 \x03(\x0b\x32&.google.cloud.automl.v1beta1.TableSpec\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"\x8a\x01\n\x16UpdateTableSpecRequest\x12?\n\ntable_spec\x18\x01 \x01(\x0b\x32&.google.cloud.automl.v1beta1.TableSpecB\x03\xe0\x41\x02\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"~\n\x14GetColumnSpecRequest\x12\x36\n\x04name\x18\x01 \x01(\tB(\xe0\x41\x02\xfa\x41"\n automl.googleapis.com/ColumnSpec\x12.\n\nfield_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"\xb8\x01\n\x16ListColumnSpecsRequest\x12\x37\n\x06parent\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1f\x61utoml.googleapis.com/TableSpec\x12.\n\nfield_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\x12\x0e\n\x06\x66ilter\x18\x03 \x01(\t\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x06 \x01(\t"q\n\x17ListColumnSpecsResponse\x12=\n\x0c\x63olumn_specs\x18\x01 \x03(\x0b\x32\'.google.cloud.automl.v1beta1.ColumnSpec\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"\x8d\x01\n\x17UpdateColumnSpecRequest\x12\x41\n\x0b\x63olumn_spec\x18\x01 \x01(\x0b\x32\'.google.cloud.automl.v1beta1.ColumnSpecB\x03\xe0\x41\x02\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask"\x87\x01\n\x12\x43reateModelRequest\x12\x39\n\x06parent\x18\x01 \x01(\tB)\xe0\x41\x02\xfa\x41#\n!locations.googleapis.com/Location\x12\x36\n\x05model\x18\x04 \x01(\x0b\x32".google.cloud.automl.v1beta1.ModelB\x03\xe0\x41\x02"D\n\x0fGetModelRequest\x12\x31\n\x04name\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/Model"\x85\x01\n\x11ListModelsRequest\x12\x39\n\x06parent\x18\x01 \x01(\tB)\xe0\x41\x02\xfa\x41#\n!locations.googleapis.com/Location\x12\x0e\n\x06\x66ilter\x18\x03 \x01(\t\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x06 \x01(\t"`\n\x12ListModelsResponse\x12\x31\n\x05model\x18\x01 \x03(\x0b\x32".google.cloud.automl.v1beta1.Model\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"G\n\x12\x44\x65leteModelRequest\x12\x31\n\x04name\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/Model"\xef\x02\n\x12\x44\x65ployModelRequest\x12\x84\x01\n0image_object_detection_model_deployment_metadata\x18\x02 \x01(\x0b\x32H.google.cloud.automl.v1beta1.ImageObjectDetectionModelDeploymentMetadataH\x00\x12\x81\x01\n.image_classification_model_deployment_metadata\x18\x04 \x01(\x0b\x32G.google.cloud.automl.v1beta1.ImageClassificationModelDeploymentMetadataH\x00\x12\x31\n\x04name\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/ModelB\x1b\n\x19model_deployment_metadata"I\n\x14UndeployModelRequest\x12\x31\n\x04name\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/Model"\x99\x01\n\x12\x45xportModelRequest\x12\x31\n\x04name\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/Model\x12P\n\routput_config\x18\x03 \x01(\x0b\x32\x34.google.cloud.automl.v1beta1.ModelExportOutputConfigB\x03\xe0\x41\x02"\xb1\x01\n\x1e\x45xportEvaluatedExamplesRequest\x12\x31\n\x04name\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/Model\x12\\\n\routput_config\x18\x03 \x01(\x0b\x32@.google.cloud.automl.v1beta1.ExportEvaluatedExamplesOutputConfigB\x03\xe0\x41\x02"X\n\x19GetModelEvaluationRequest\x12;\n\x04name\x18\x01 \x01(\tB-\xe0\x41\x02\xfa\x41\'\n%automl.googleapis.com/ModelEvaluation"\x89\x01\n\x1bListModelEvaluationsRequest\x12\x33\n\x06parent\x18\x01 \x01(\tB#\xe0\x41\x02\xfa\x41\x1d\n\x1b\x61utoml.googleapis.com/Model\x12\x0e\n\x06\x66ilter\x18\x03 \x01(\t\x12\x11\n\tpage_size\x18\x04 \x01(\x05\x12\x12\n\npage_token\x18\x06 \x01(\t"\x7f\n\x1cListModelEvaluationsResponse\x12\x46\n\x10model_evaluation\x18\x01 \x03(\x0b\x32,.google.cloud.automl.v1beta1.ModelEvaluation\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t2\xed\'\n\x06\x41utoMl\x12\xbd\x01\n\rCreateDataset\x12\x31.google.cloud.automl.v1beta1.CreateDatasetRequest\x1a$.google.cloud.automl.v1beta1.Dataset"S\x82\xd3\xe4\x93\x02<"1/v1beta1/{parent=projects/*/locations/*}/datasets:\x07\x64\x61taset\xda\x41\x0eparent,dataset\x12\xa4\x01\n\nGetDataset\x12..google.cloud.automl.v1beta1.GetDatasetRequest\x1a$.google.cloud.automl.v1beta1.Dataset"@\x82\xd3\xe4\x93\x02\x33\x12\x31/v1beta1/{name=projects/*/locations/*/datasets/*}\xda\x41\x04name\x12\xb7\x01\n\x0cListDatasets\x12\x30.google.cloud.automl.v1beta1.ListDatasetsRequest\x1a\x31.google.cloud.automl.v1beta1.ListDatasetsResponse"B\x82\xd3\xe4\x93\x02\x33\x12\x31/v1beta1/{parent=projects/*/locations/*}/datasets\xda\x41\x06parent\x12\xbe\x01\n\rUpdateDataset\x12\x31.google.cloud.automl.v1beta1.UpdateDatasetRequest\x1a$.google.cloud.automl.v1beta1.Dataset"T\x82\xd3\xe4\x93\x02\x44\x32\x39/v1beta1/{dataset.name=projects/*/locations/*/datasets/*}:\x07\x64\x61taset\xda\x41\x07\x64\x61taset\x12\xd0\x01\n\rDeleteDataset\x12\x31.google.cloud.automl.v1beta1.DeleteDatasetRequest\x1a\x1d.google.longrunning.Operation"m\x82\xd3\xe4\x93\x02\x33*1/v1beta1/{name=projects/*/locations/*/datasets/*}\xda\x41\x04name\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\xe6\x01\n\nImportData\x12..google.cloud.automl.v1beta1.ImportDataRequest\x1a\x1d.google.longrunning.Operation"\x88\x01\x82\xd3\xe4\x93\x02\x41"/v1beta1/{name=projects/*/locations/*/datasets/*/tableSpecs/*}\xda\x41\x04name\x12\xca\x01\n\x0eListTableSpecs\x12\x32.google.cloud.automl.v1beta1.ListTableSpecsRequest\x1a\x33.google.cloud.automl.v1beta1.ListTableSpecsResponse"O\x82\xd3\xe4\x93\x02@\x12>/v1beta1/{parent=projects/*/locations/*/datasets/*}/tableSpecs\xda\x41\x06parent\x12\xda\x01\n\x0fUpdateTableSpec\x12\x33.google.cloud.automl.v1beta1.UpdateTableSpecRequest\x1a&.google.cloud.automl.v1beta1.TableSpec"j\x82\xd3\xe4\x93\x02W2I/v1beta1/{table_spec.name=projects/*/locations/*/datasets/*/tableSpecs/*}:\ntable_spec\xda\x41\ntable_spec\x12\xc8\x01\n\rGetColumnSpec\x12\x31.google.cloud.automl.v1beta1.GetColumnSpecRequest\x1a\'.google.cloud.automl.v1beta1.ColumnSpec"[\x82\xd3\xe4\x93\x02N\x12L/v1beta1/{name=projects/*/locations/*/datasets/*/tableSpecs/*/columnSpecs/*}\xda\x41\x04name\x12\xdb\x01\n\x0fListColumnSpecs\x12\x33.google.cloud.automl.v1beta1.ListColumnSpecsRequest\x1a\x34.google.cloud.automl.v1beta1.ListColumnSpecsResponse"]\x82\xd3\xe4\x93\x02N\x12L/v1beta1/{parent=projects/*/locations/*/datasets/*/tableSpecs/*}/columnSpecs\xda\x41\x06parent\x12\xee\x01\n\x10UpdateColumnSpec\x12\x34.google.cloud.automl.v1beta1.UpdateColumnSpecRequest\x1a\'.google.cloud.automl.v1beta1.ColumnSpec"{\x82\xd3\xe4\x93\x02g2X/v1beta1/{column_spec.name=projects/*/locations/*/datasets/*/tableSpecs/*/columnSpecs/*}:\x0b\x63olumn_spec\xda\x41\x0b\x63olumn_spec\x12\xc9\x01\n\x0b\x43reateModel\x12/.google.cloud.automl.v1beta1.CreateModelRequest\x1a\x1d.google.longrunning.Operation"j\x82\xd3\xe4\x93\x02\x38"//v1beta1/{parent=projects/*/locations/*}/models:\x05model\xda\x41\x0cparent,model\xca\x41\x1a\n\x05Model\x12\x11OperationMetadata\x12\x9c\x01\n\x08GetModel\x12,.google.cloud.automl.v1beta1.GetModelRequest\x1a".google.cloud.automl.v1beta1.Model">\x82\xd3\xe4\x93\x02\x31\x12//v1beta1/{name=projects/*/locations/*/models/*}\xda\x41\x04name\x12\xaf\x01\n\nListModels\x12..google.cloud.automl.v1beta1.ListModelsRequest\x1a/.google.cloud.automl.v1beta1.ListModelsResponse"@\x82\xd3\xe4\x93\x02\x31\x12//v1beta1/{parent=projects/*/locations/*}/models\xda\x41\x06parent\x12\xca\x01\n\x0b\x44\x65leteModel\x12/.google.cloud.automl.v1beta1.DeleteModelRequest\x1a\x1d.google.longrunning.Operation"k\x82\xd3\xe4\x93\x02\x31*//v1beta1/{name=projects/*/locations/*/models/*}\xda\x41\x04name\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\xd4\x01\n\x0b\x44\x65ployModel\x12/.google.cloud.automl.v1beta1.DeployModelRequest\x1a\x1d.google.longrunning.Operation"u\x82\xd3\xe4\x93\x02;"6/v1beta1/{name=projects/*/locations/*/models/*}:deploy:\x01*\xda\x41\x04name\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\xda\x01\n\rUndeployModel\x12\x31.google.cloud.automl.v1beta1.UndeployModelRequest\x1a\x1d.google.longrunning.Operation"w\x82\xd3\xe4\x93\x02="8/v1beta1/{name=projects/*/locations/*/models/*}:undeploy:\x01*\xda\x41\x04name\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\xe3\x01\n\x0b\x45xportModel\x12/.google.cloud.automl.v1beta1.ExportModelRequest\x1a\x1d.google.longrunning.Operation"\x83\x01\x82\xd3\xe4\x93\x02;"6/v1beta1/{name=projects/*/locations/*/models/*}:export:\x01*\xda\x41\x12name,output_config\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\x8c\x02\n\x17\x45xportEvaluatedExamples\x12;.google.cloud.automl.v1beta1.ExportEvaluatedExamplesRequest\x1a\x1d.google.longrunning.Operation"\x94\x01\x82\xd3\xe4\x93\x02L"G/v1beta1/{name=projects/*/locations/*/models/*}:exportEvaluatedExamples:\x01*\xda\x41\x12name,output_config\xca\x41*\n\x15google.protobuf.Empty\x12\x11OperationMetadata\x12\xcd\x01\n\x12GetModelEvaluation\x12\x36.google.cloud.automl.v1beta1.GetModelEvaluationRequest\x1a,.google.cloud.automl.v1beta1.ModelEvaluation"Q\x82\xd3\xe4\x93\x02\x44\x12\x42/v1beta1/{name=projects/*/locations/*/models/*/modelEvaluations/*}\xda\x41\x04name\x12\xe0\x01\n\x14ListModelEvaluations\x12\x38.google.cloud.automl.v1beta1.ListModelEvaluationsRequest\x1a\x39.google.cloud.automl.v1beta1.ListModelEvaluationsResponse"S\x82\xd3\xe4\x93\x02\x44\x12\x42/v1beta1/{parent=projects/*/locations/*/models/*}/modelEvaluations\xda\x41\x06parent\x1aI\xca\x41\x15\x61utoml.googleapis.com\xd2\x41.https://www.googleapis.com/auth/cloud-platformB\xb2\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\x0b\x41utoMlProtoP\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ google_dot_api_dot_annotations__pb2.DESCRIPTOR, + google_dot_api_dot_client__pb2.DESCRIPTOR, + google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, + google_dot_api_dot_resource__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_annotation__payload__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_annotation__spec__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_column__spec__pb2.DESCRIPTOR, @@ -77,7 +76,6 @@ google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_table__spec__pb2.DESCRIPTOR, google_dot_longrunning_dot_operations__pb2.DESCRIPTOR, google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR, - google_dot_api_dot_client__pb2.DESCRIPTOR, ], ) @@ -88,6 +86,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="parent", @@ -98,14 +97,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A#\n!locations.googleapis.com/Location", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="dataset", @@ -122,8 +122,9 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -134,8 +135,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=725, - serialized_end=818, + serialized_start=786, + serialized_end=927, ) @@ -145,6 +146,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -155,15 +157,16 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A\037\n\035automl.googleapis.com/Dataset", file=DESCRIPTOR, - ) + create_key=_descriptor._internal_create_key, + ), ], extensions=[], nested_types=[], @@ -173,8 +176,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=820, - serialized_end=853, + serialized_start=929, + serialized_end=1001, ) @@ -184,6 +187,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="parent", @@ -194,14 +198,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A#\n!locations.googleapis.com/Location", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="filter", @@ -212,7 +217,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -220,6 +225,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="page_size", @@ -238,6 +244,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="page_token", @@ -248,7 +255,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -256,6 +263,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -266,8 +274,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=855, - serialized_end=947, + serialized_start=1004, + serialized_end=1139, ) @@ -277,6 +285,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="datasets", @@ -295,6 +304,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="next_page_token", @@ -305,7 +315,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -313,6 +323,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -323,8 +334,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=949, - serialized_end=1052, + serialized_start=1141, + serialized_end=1244, ) @@ -334,6 +345,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="dataset", @@ -350,8 +362,9 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="update_mask", @@ -370,6 +383,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -380,8 +394,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1054, - serialized_end=1180, + serialized_start=1247, + serialized_end=1378, ) @@ -391,6 +405,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -401,15 +416,16 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A\037\n\035automl.googleapis.com/Dataset", file=DESCRIPTOR, - ) + create_key=_descriptor._internal_create_key, + ), ], extensions=[], nested_types=[], @@ -419,8 +435,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1182, - serialized_end=1218, + serialized_start=1380, + serialized_end=1455, ) @@ -430,6 +446,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -440,14 +457,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A\037\n\035automl.googleapis.com/Dataset", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="input_config", @@ -464,8 +482,9 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -476,8 +495,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1220, - serialized_end=1317, + serialized_start=1458, + serialized_end=1599, ) @@ -487,6 +506,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -497,14 +517,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A\037\n\035automl.googleapis.com/Dataset", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="output_config", @@ -521,8 +542,9 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -533,8 +555,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1319, - serialized_end=1418, + serialized_start=1602, + serialized_end=1745, ) @@ -544,6 +566,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -554,15 +577,16 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A&\n$automl.googleapis.com/AnnotationSpec", file=DESCRIPTOR, - ) + create_key=_descriptor._internal_create_key, + ), ], extensions=[], nested_types=[], @@ -572,8 +596,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1420, - serialized_end=1460, + serialized_start=1747, + serialized_end=1833, ) @@ -583,6 +607,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -593,14 +618,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A!\n\037automl.googleapis.com/TableSpec", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="field_mask", @@ -619,6 +645,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -629,8 +656,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1462, - serialized_end=1545, + serialized_start=1835, + serialized_end=1959, ) @@ -640,6 +667,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="parent", @@ -650,14 +678,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A\037\n\035automl.googleapis.com/Dataset", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="field_mask", @@ -676,6 +705,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="filter", @@ -686,7 +716,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -694,6 +724,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="page_size", @@ -712,6 +743,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="page_token", @@ -722,7 +754,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -730,6 +762,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -740,8 +773,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1548, - serialized_end=1690, + serialized_start=1962, + serialized_end=2143, ) @@ -751,6 +784,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="table_specs", @@ -769,6 +803,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="next_page_token", @@ -779,7 +814,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -787,6 +822,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -797,8 +833,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1692, - serialized_end=1802, + serialized_start=2145, + serialized_end=2255, ) @@ -808,6 +844,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="table_spec", @@ -824,8 +861,9 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="update_mask", @@ -844,6 +882,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -854,8 +893,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1805, - serialized_end=1938, + serialized_start=2258, + serialized_end=2396, ) @@ -865,6 +904,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -875,14 +915,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b'\340A\002\372A"\n automl.googleapis.com/ColumnSpec', file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="field_mask", @@ -901,6 +942,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -911,8 +953,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1940, - serialized_end=2024, + serialized_start=2398, + serialized_end=2524, ) @@ -922,6 +964,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="parent", @@ -932,14 +975,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A!\n\037automl.googleapis.com/TableSpec", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="field_mask", @@ -958,6 +1002,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="filter", @@ -968,7 +1013,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -976,6 +1021,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="page_size", @@ -994,6 +1040,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="page_token", @@ -1004,7 +1051,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1012,6 +1059,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1022,8 +1070,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2027, - serialized_end=2170, + serialized_start=2527, + serialized_end=2711, ) @@ -1033,6 +1081,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="column_specs", @@ -1051,6 +1100,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="next_page_token", @@ -1061,7 +1111,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1069,6 +1119,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1079,8 +1130,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2172, - serialized_end=2285, + serialized_start=2713, + serialized_end=2826, ) @@ -1090,6 +1141,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="column_spec", @@ -1106,8 +1158,9 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="update_mask", @@ -1126,6 +1179,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1136,8 +1190,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2288, - serialized_end=2424, + serialized_start=2829, + serialized_end=2970, ) @@ -1147,6 +1201,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="parent", @@ -1157,14 +1212,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A#\n!locations.googleapis.com/Location", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="model", @@ -1181,8 +1237,9 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1193,8 +1250,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2426, - serialized_end=2513, + serialized_start=2973, + serialized_end=3108, ) @@ -1204,6 +1261,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -1214,15 +1272,16 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A\035\n\033automl.googleapis.com/Model", file=DESCRIPTOR, - ) + create_key=_descriptor._internal_create_key, + ), ], extensions=[], nested_types=[], @@ -1232,8 +1291,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2515, - serialized_end=2546, + serialized_start=3110, + serialized_end=3178, ) @@ -1243,6 +1302,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="parent", @@ -1253,14 +1313,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A#\n!locations.googleapis.com/Location", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="filter", @@ -1271,7 +1332,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1279,6 +1340,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="page_size", @@ -1297,6 +1359,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="page_token", @@ -1307,7 +1370,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1315,6 +1378,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1325,8 +1389,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2548, - serialized_end=2638, + serialized_start=3181, + serialized_end=3314, ) @@ -1336,6 +1400,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="model", @@ -1354,6 +1419,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="next_page_token", @@ -1364,7 +1430,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1372,6 +1438,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1382,8 +1449,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2640, - serialized_end=2736, + serialized_start=3316, + serialized_end=3412, ) @@ -1393,6 +1460,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -1403,15 +1471,16 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A\035\n\033automl.googleapis.com/Model", file=DESCRIPTOR, - ) + create_key=_descriptor._internal_create_key, + ), ], extensions=[], nested_types=[], @@ -1421,8 +1490,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=2738, - serialized_end=2772, + serialized_start=3414, + serialized_end=3485, ) @@ -1432,6 +1501,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="image_object_detection_model_deployment_metadata", @@ -1450,6 +1520,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="image_classification_model_deployment_metadata", @@ -1468,6 +1539,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="name", @@ -1478,14 +1550,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A\035\n\033automl.googleapis.com/Model", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1501,11 +1574,12 @@ full_name="google.cloud.automl.v1beta1.DeployModelRequest.model_deployment_metadata", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], - ) + ), ], - serialized_start=2775, - serialized_end=3105, + serialized_start=3488, + serialized_end=3855, ) @@ -1515,6 +1589,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -1525,15 +1600,16 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A\035\n\033automl.googleapis.com/Model", file=DESCRIPTOR, - ) + create_key=_descriptor._internal_create_key, + ), ], extensions=[], nested_types=[], @@ -1543,8 +1619,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3107, - serialized_end=3143, + serialized_start=3857, + serialized_end=3930, ) @@ -1554,6 +1630,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -1564,14 +1641,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A\035\n\033automl.googleapis.com/Model", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="output_config", @@ -1588,8 +1666,9 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1600,8 +1679,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3145, - serialized_end=3256, + serialized_start=3933, + serialized_end=4086, ) @@ -1611,6 +1690,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -1621,14 +1701,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A\035\n\033automl.googleapis.com/Model", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="output_config", @@ -1645,8 +1726,9 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1657,8 +1739,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3259, - serialized_end=3394, + serialized_start=4089, + serialized_end=4266, ) @@ -1668,6 +1750,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -1678,15 +1761,16 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A'\n%automl.googleapis.com/ModelEvaluation", file=DESCRIPTOR, - ) + create_key=_descriptor._internal_create_key, + ), ], extensions=[], nested_types=[], @@ -1696,8 +1780,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3396, - serialized_end=3437, + serialized_start=4268, + serialized_end=4356, ) @@ -1707,6 +1791,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="parent", @@ -1717,14 +1802,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002\372A\035\n\033automl.googleapis.com/Model", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="filter", @@ -1735,7 +1821,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1743,6 +1829,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="page_size", @@ -1761,6 +1848,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="page_token", @@ -1771,7 +1859,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1779,6 +1867,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1789,8 +1878,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3439, - serialized_end=3539, + serialized_start=4359, + serialized_end=4496, ) @@ -1800,6 +1889,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="model_evaluation", @@ -1818,6 +1908,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="next_page_token", @@ -1828,7 +1919,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -1836,6 +1927,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -1846,8 +1938,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=3541, - serialized_end=3668, + serialized_start=4498, + serialized_end=4625, ) _CREATEDATASETREQUEST.fields_by_name[ @@ -2001,151 +2093,147 @@ CreateDatasetRequest = _reflection.GeneratedProtocolMessageType( "CreateDatasetRequest", (_message.Message,), - dict( - DESCRIPTOR=_CREATEDATASETREQUEST, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Request message for - [AutoMl.CreateDataset][google.cloud.automl.v1beta1.AutoMl.CreateDataset]. - + { + "DESCRIPTOR": _CREATEDATASETREQUEST, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.CreateDataset][google.cloud.automl.v1beta1 + .AutoMl.CreateDataset]. Attributes: parent: - The resource name of the project to create the dataset for. + Required. The resource name of the project to create the + dataset for. dataset: - The dataset to create. + Required. The dataset to create. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.CreateDatasetRequest) - ), + }, ) _sym_db.RegisterMessage(CreateDatasetRequest) GetDatasetRequest = _reflection.GeneratedProtocolMessageType( "GetDatasetRequest", (_message.Message,), - dict( - DESCRIPTOR=_GETDATASETREQUEST, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _GETDATASETREQUEST, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.GetDataset][google.cloud.automl.v1beta1.AutoMl.GetDataset]. - Attributes: name: - The resource name of the dataset to retrieve. + Required. The resource name of the dataset to retrieve. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.GetDatasetRequest) - ), + }, ) _sym_db.RegisterMessage(GetDatasetRequest) ListDatasetsRequest = _reflection.GeneratedProtocolMessageType( "ListDatasetsRequest", (_message.Message,), - dict( - DESCRIPTOR=_LISTDATASETSREQUEST, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Request message for - [AutoMl.ListDatasets][google.cloud.automl.v1beta1.AutoMl.ListDatasets]. - + { + "DESCRIPTOR": _LISTDATASETSREQUEST, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.ListDatasets][google.cloud.automl.v1beta1. + AutoMl.ListDatasets]. Attributes: parent: - The resource name of the project from which to list datasets. + Required. The resource name of the project from which to list + datasets. filter: An expression for filtering the results of the request. - - ``dataset_metadata`` - for existence of the case (e.g. - image\_classification\_dataset\_metadata:\*). Some examples of - using the filter are: - - ``translation_dataset_metadata:*`` --> The dataset has - translation\_dataset\_metadata. + ``dataset_metadata`` - for existence of the case ( + e.g. ``image_classification_dataset_metadata``). Some examples + of using the filter are: - + ``translation_dataset_metadata:*`` –> The dataset has + translation_dataset_metadata. page_size: Requested page size. Server may return fewer results than requested. If unspecified, server will pick a default size. page_token: A token identifying a page of results for the server to return - Typically obtained via [ListDatasetsResponse.next\_page\_token - ][google.cloud.automl.v1beta1.ListDatasetsResponse.next\_page\ - _token] of the previous [AutoMl.ListDatasets][google.cloud.aut - oml.v1beta1.AutoMl.ListDatasets] call. + Typically obtained via [ListDatasetsResponse.next_page_token][ + google.cloud.automl.v1beta1.ListDatasetsResponse.next_page_tok + en] of the previous [AutoMl.ListDatasets][google.cloud.automl. + v1beta1.AutoMl.ListDatasets] call. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ListDatasetsRequest) - ), + }, ) _sym_db.RegisterMessage(ListDatasetsRequest) ListDatasetsResponse = _reflection.GeneratedProtocolMessageType( "ListDatasetsResponse", (_message.Message,), - dict( - DESCRIPTOR=_LISTDATASETSRESPONSE, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Response message for - [AutoMl.ListDatasets][google.cloud.automl.v1beta1.AutoMl.ListDatasets]. - + { + "DESCRIPTOR": _LISTDATASETSRESPONSE, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Response message for [AutoMl.ListDatasets][google.cloud.automl.v1beta1 + .AutoMl.ListDatasets]. Attributes: datasets: The datasets read. next_page_token: A token to retrieve next page of results. Pass to [ListDataset - sRequest.page\_token][google.cloud.automl.v1beta1.ListDatasets - Request.page\_token] to obtain that page. + sRequest.page_token][google.cloud.automl.v1beta1.ListDatasetsR + equest.page_token] to obtain that page. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ListDatasetsResponse) - ), + }, ) _sym_db.RegisterMessage(ListDatasetsResponse) UpdateDatasetRequest = _reflection.GeneratedProtocolMessageType( "UpdateDatasetRequest", (_message.Message,), - dict( - DESCRIPTOR=_UPDATEDATASETREQUEST, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Request message for - [AutoMl.UpdateDataset][google.cloud.automl.v1beta1.AutoMl.UpdateDataset] - + { + "DESCRIPTOR": _UPDATEDATASETREQUEST, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.UpdateDataset][google.cloud.automl.v1beta1 + .AutoMl.UpdateDataset] Attributes: dataset: - The dataset which replaces the resource on the server. + Required. The dataset which replaces the resource on the + server. update_mask: The update mask applies to the resource. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.UpdateDatasetRequest) - ), + }, ) _sym_db.RegisterMessage(UpdateDatasetRequest) DeleteDatasetRequest = _reflection.GeneratedProtocolMessageType( "DeleteDatasetRequest", (_message.Message,), - dict( - DESCRIPTOR=_DELETEDATASETREQUEST, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Request message for - [AutoMl.DeleteDataset][google.cloud.automl.v1beta1.AutoMl.DeleteDataset]. - + { + "DESCRIPTOR": _DELETEDATASETREQUEST, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.DeleteDataset][google.cloud.automl.v1beta1 + .AutoMl.DeleteDataset]. Attributes: name: - The resource name of the dataset to delete. + Required. The resource name of the dataset to delete. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.DeleteDatasetRequest) - ), + }, ) _sym_db.RegisterMessage(DeleteDatasetRequest) ImportDataRequest = _reflection.GeneratedProtocolMessageType( "ImportDataRequest", (_message.Message,), - dict( - DESCRIPTOR=_IMPORTDATAREQUEST, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _IMPORTDATAREQUEST, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.ImportData][google.cloud.automl.v1beta1.AutoMl.ImportData]. - Attributes: name: Required. Dataset name. Dataset must already exist. All @@ -2155,20 +2243,19 @@ semantics, if any. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ImportDataRequest) - ), + }, ) _sym_db.RegisterMessage(ImportDataRequest) ExportDataRequest = _reflection.GeneratedProtocolMessageType( "ExportDataRequest", (_message.Message,), - dict( - DESCRIPTOR=_EXPORTDATAREQUEST, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _EXPORTDATAREQUEST, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.ExportData][google.cloud.automl.v1beta1.AutoMl.ExportData]. - Attributes: name: Required. The resource name of the dataset. @@ -2176,63 +2263,62 @@ Required. The desired output location. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ExportDataRequest) - ), + }, ) _sym_db.RegisterMessage(ExportDataRequest) GetAnnotationSpecRequest = _reflection.GeneratedProtocolMessageType( "GetAnnotationSpecRequest", (_message.Message,), - dict( - DESCRIPTOR=_GETANNOTATIONSPECREQUEST, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Request message for - [AutoMl.GetAnnotationSpec][google.cloud.automl.v1beta1.AutoMl.GetAnnotationSpec]. - + { + "DESCRIPTOR": _GETANNOTATIONSPECREQUEST, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.GetAnnotationSpec][google.cloud.automl.v1b + eta1.AutoMl.GetAnnotationSpec]. Attributes: name: - The resource name of the annotation spec to retrieve. + Required. The resource name of the annotation spec to + retrieve. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.GetAnnotationSpecRequest) - ), + }, ) _sym_db.RegisterMessage(GetAnnotationSpecRequest) GetTableSpecRequest = _reflection.GeneratedProtocolMessageType( "GetTableSpecRequest", (_message.Message,), - dict( - DESCRIPTOR=_GETTABLESPECREQUEST, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Request message for - [AutoMl.GetTableSpec][google.cloud.automl.v1beta1.AutoMl.GetTableSpec]. - + { + "DESCRIPTOR": _GETTABLESPECREQUEST, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.GetTableSpec][google.cloud.automl.v1beta1. + AutoMl.GetTableSpec]. Attributes: name: - The resource name of the table spec to retrieve. + Required. The resource name of the table spec to retrieve. field_mask: Mask specifying which fields to read. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.GetTableSpecRequest) - ), + }, ) _sym_db.RegisterMessage(GetTableSpecRequest) ListTableSpecsRequest = _reflection.GeneratedProtocolMessageType( "ListTableSpecsRequest", (_message.Message,), - dict( - DESCRIPTOR=_LISTTABLESPECSREQUEST, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Request message for - [AutoMl.ListTableSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs]. - + { + "DESCRIPTOR": _LISTTABLESPECSREQUEST, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.ListTableSpecs][google.cloud.automl.v1beta + 1.AutoMl.ListTableSpecs]. Attributes: parent: - The resource name of the dataset to list table specs from. + Required. The resource name of the dataset to list table specs + from. field_mask: Mask specifying which fields to read. filter: @@ -2244,94 +2330,91 @@ page_token: A token identifying a page of results for the server to return. Typically obtained from the [ListTableSpecsResponse.ne - xt\_page\_token][google.cloud.automl.v1beta1.ListTableSpecsRes - ponse.next\_page\_token] field of the previous [AutoMl.ListTab - leSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs] - call. + xt_page_token][google.cloud.automl.v1beta1.ListTableSpecsRespo + nse.next_page_token] field of the previous [AutoMl.ListTableSp + ecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs] call. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ListTableSpecsRequest) - ), + }, ) _sym_db.RegisterMessage(ListTableSpecsRequest) ListTableSpecsResponse = _reflection.GeneratedProtocolMessageType( "ListTableSpecsResponse", (_message.Message,), - dict( - DESCRIPTOR=_LISTTABLESPECSRESPONSE, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Response message for - [AutoMl.ListTableSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs]. - + { + "DESCRIPTOR": _LISTTABLESPECSRESPONSE, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Response message for [AutoMl.ListTableSpecs][google.cloud.automl.v1bet + a1.AutoMl.ListTableSpecs]. Attributes: table_specs: The table specs read. next_page_token: A token to retrieve next page of results. Pass to [ListTableSp - ecsRequest.page\_token][google.cloud.automl.v1beta1.ListTableS - pecsRequest.page\_token] to obtain that page. + ecsRequest.page_token][google.cloud.automl.v1beta1.ListTableSp + ecsRequest.page_token] to obtain that page. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ListTableSpecsResponse) - ), + }, ) _sym_db.RegisterMessage(ListTableSpecsResponse) UpdateTableSpecRequest = _reflection.GeneratedProtocolMessageType( "UpdateTableSpecRequest", (_message.Message,), - dict( - DESCRIPTOR=_UPDATETABLESPECREQUEST, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Request message for - [AutoMl.UpdateTableSpec][google.cloud.automl.v1beta1.AutoMl.UpdateTableSpec] - + { + "DESCRIPTOR": _UPDATETABLESPECREQUEST, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.UpdateTableSpec][google.cloud.automl.v1bet + a1.AutoMl.UpdateTableSpec] Attributes: table_spec: - The table spec which replaces the resource on the server. + Required. The table spec which replaces the resource on the + server. update_mask: The update mask applies to the resource. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.UpdateTableSpecRequest) - ), + }, ) _sym_db.RegisterMessage(UpdateTableSpecRequest) GetColumnSpecRequest = _reflection.GeneratedProtocolMessageType( "GetColumnSpecRequest", (_message.Message,), - dict( - DESCRIPTOR=_GETCOLUMNSPECREQUEST, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Request message for - [AutoMl.GetColumnSpec][google.cloud.automl.v1beta1.AutoMl.GetColumnSpec]. - + { + "DESCRIPTOR": _GETCOLUMNSPECREQUEST, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.GetColumnSpec][google.cloud.automl.v1beta1 + .AutoMl.GetColumnSpec]. Attributes: name: - The resource name of the column spec to retrieve. + Required. The resource name of the column spec to retrieve. field_mask: Mask specifying which fields to read. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.GetColumnSpecRequest) - ), + }, ) _sym_db.RegisterMessage(GetColumnSpecRequest) ListColumnSpecsRequest = _reflection.GeneratedProtocolMessageType( "ListColumnSpecsRequest", (_message.Message,), - dict( - DESCRIPTOR=_LISTCOLUMNSPECSREQUEST, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Request message for - [AutoMl.ListColumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs]. - + { + "DESCRIPTOR": _LISTCOLUMNSPECSREQUEST, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.ListColumnSpecs][google.cloud.automl.v1bet + a1.AutoMl.ListColumnSpecs]. Attributes: parent: - The resource name of the table spec to list column specs from. + Required. The resource name of the table spec to list column + specs from. field_mask: Mask specifying which fields to read. filter: @@ -2343,188 +2426,182 @@ page_token: A token identifying a page of results for the server to return. Typically obtained from the [ListColumnSpecsResponse.n - ext\_page\_token][google.cloud.automl.v1beta1.ListColumnSpecsR - esponse.next\_page\_token] field of the previous [AutoMl.ListC - olumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs - ] call. + ext_page_token][google.cloud.automl.v1beta1.ListColumnSpecsRes + ponse.next_page_token] field of the previous [AutoMl.ListColum + nSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs] + call. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ListColumnSpecsRequest) - ), + }, ) _sym_db.RegisterMessage(ListColumnSpecsRequest) ListColumnSpecsResponse = _reflection.GeneratedProtocolMessageType( "ListColumnSpecsResponse", (_message.Message,), - dict( - DESCRIPTOR=_LISTCOLUMNSPECSRESPONSE, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Response message for - [AutoMl.ListColumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs]. - + { + "DESCRIPTOR": _LISTCOLUMNSPECSRESPONSE, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Response message for [AutoMl.ListColumnSpecs][google.cloud.automl.v1be + ta1.AutoMl.ListColumnSpecs]. Attributes: column_specs: The column specs read. next_page_token: A token to retrieve next page of results. Pass to [ListColumnS - pecsRequest.page\_token][google.cloud.automl.v1beta1.ListColum - nSpecsRequest.page\_token] to obtain that page. + pecsRequest.page_token][google.cloud.automl.v1beta1.ListColumn + SpecsRequest.page_token] to obtain that page. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ListColumnSpecsResponse) - ), + }, ) _sym_db.RegisterMessage(ListColumnSpecsResponse) UpdateColumnSpecRequest = _reflection.GeneratedProtocolMessageType( "UpdateColumnSpecRequest", (_message.Message,), - dict( - DESCRIPTOR=_UPDATECOLUMNSPECREQUEST, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Request message for - [AutoMl.UpdateColumnSpec][google.cloud.automl.v1beta1.AutoMl.UpdateColumnSpec] - + { + "DESCRIPTOR": _UPDATECOLUMNSPECREQUEST, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.UpdateColumnSpec][google.cloud.automl.v1be + ta1.AutoMl.UpdateColumnSpec] Attributes: column_spec: - The column spec which replaces the resource on the server. + Required. The column spec which replaces the resource on the + server. update_mask: The update mask applies to the resource. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.UpdateColumnSpecRequest) - ), + }, ) _sym_db.RegisterMessage(UpdateColumnSpecRequest) CreateModelRequest = _reflection.GeneratedProtocolMessageType( "CreateModelRequest", (_message.Message,), - dict( - DESCRIPTOR=_CREATEMODELREQUEST, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _CREATEMODELREQUEST, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.CreateModel][google.cloud.automl.v1beta1.AutoMl.CreateModel]. - Attributes: parent: - Resource name of the parent project where the model is being - created. + Required. Resource name of the parent project where the model + is being created. model: - The model to create. + Required. The model to create. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.CreateModelRequest) - ), + }, ) _sym_db.RegisterMessage(CreateModelRequest) GetModelRequest = _reflection.GeneratedProtocolMessageType( "GetModelRequest", (_message.Message,), - dict( - DESCRIPTOR=_GETMODELREQUEST, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _GETMODELREQUEST, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.GetModel][google.cloud.automl.v1beta1.AutoMl.GetModel]. - Attributes: name: - Resource name of the model. + Required. Resource name of the model. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.GetModelRequest) - ), + }, ) _sym_db.RegisterMessage(GetModelRequest) ListModelsRequest = _reflection.GeneratedProtocolMessageType( "ListModelsRequest", (_message.Message,), - dict( - DESCRIPTOR=_LISTMODELSREQUEST, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _LISTMODELSREQUEST, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels]. - Attributes: parent: - Resource name of the project, from which to list the models. + Required. Resource name of the project, from which to list the + models. filter: An expression for filtering the results of the request. - - ``model_metadata`` - for existence of the case (e.g. - video\_classification\_model\_metadata:\*). - ``dataset_id`` + ``model_metadata`` - for existence of the case ( + e.g. ``video_classification_model_metadata:*``). - ``dataset_id`` - for = or !=. Some examples of using the filter are: - - ``image_classification_model_metadata:*`` --> The model has - image\_classification\_model\_metadata. - ``dataset_id=5`` - --> The model was created from a dataset with ID 5. + ``image_classification_model_metadata:*`` –> The model has + image_classification_model_metadata. - ``dataset_id=5`` –> + The model was created from a dataset with ID 5. page_size: Requested page size. page_token: A token identifying a page of results for the server to return - Typically obtained via [ListModelsResponse.next\_page\_token][ - google.cloud.automl.v1beta1.ListModelsResponse.next\_page\_tok - en] of the previous [AutoMl.ListModels][google.cloud.automl.v1 - beta1.AutoMl.ListModels] call. + Typically obtained via [ListModelsResponse.next_page_token][go + ogle.cloud.automl.v1beta1.ListModelsResponse.next_page_token] + of the previous [AutoMl.ListModels][google.cloud.automl.v1beta + 1.AutoMl.ListModels] call. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ListModelsRequest) - ), + }, ) _sym_db.RegisterMessage(ListModelsRequest) ListModelsResponse = _reflection.GeneratedProtocolMessageType( "ListModelsResponse", (_message.Message,), - dict( - DESCRIPTOR=_LISTMODELSRESPONSE, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Response message for + { + "DESCRIPTOR": _LISTMODELSRESPONSE, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Response message for [AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels]. - Attributes: model: List of models in the requested page. next_page_token: A token to retrieve next page of results. Pass to [ListModelsR - equest.page\_token][google.cloud.automl.v1beta1.ListModelsRequ - est.page\_token] to obtain that page. + equest.page_token][google.cloud.automl.v1beta1.ListModelsReque + st.page_token] to obtain that page. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ListModelsResponse) - ), + }, ) _sym_db.RegisterMessage(ListModelsResponse) DeleteModelRequest = _reflection.GeneratedProtocolMessageType( "DeleteModelRequest", (_message.Message,), - dict( - DESCRIPTOR=_DELETEMODELREQUEST, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _DELETEMODELREQUEST, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.DeleteModel][google.cloud.automl.v1beta1.AutoMl.DeleteModel]. - Attributes: name: - Resource name of the model being deleted. + Required. Resource name of the model being deleted. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.DeleteModelRequest) - ), + }, ) _sym_db.RegisterMessage(DeleteModelRequest) DeployModelRequest = _reflection.GeneratedProtocolMessageType( "DeployModelRequest", (_message.Message,), - dict( - DESCRIPTOR=_DEPLOYMODELREQUEST, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _DEPLOYMODELREQUEST, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.DeployModel][google.cloud.automl.v1beta1.AutoMl.DeployModel]. - Attributes: model_deployment_metadata: The per-domain specific deployment parameters. @@ -2533,43 +2610,41 @@ image_classification_model_deployment_metadata: Model deployment metadata specific to Image Classification. name: - Resource name of the model to deploy. + Required. Resource name of the model to deploy. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.DeployModelRequest) - ), + }, ) _sym_db.RegisterMessage(DeployModelRequest) UndeployModelRequest = _reflection.GeneratedProtocolMessageType( "UndeployModelRequest", (_message.Message,), - dict( - DESCRIPTOR=_UNDEPLOYMODELREQUEST, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Request message for - [AutoMl.UndeployModel][google.cloud.automl.v1beta1.AutoMl.UndeployModel]. - + { + "DESCRIPTOR": _UNDEPLOYMODELREQUEST, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.UndeployModel][google.cloud.automl.v1beta1 + .AutoMl.UndeployModel]. Attributes: name: - Resource name of the model to undeploy. + Required. Resource name of the model to undeploy. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.UndeployModelRequest) - ), + }, ) _sym_db.RegisterMessage(UndeployModelRequest) ExportModelRequest = _reflection.GeneratedProtocolMessageType( "ExportModelRequest", (_message.Message,), - dict( - DESCRIPTOR=_EXPORTMODELREQUEST, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Request message for + { + "DESCRIPTOR": _EXPORTMODELREQUEST, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]. - Models need to be enabled for exporting, otherwise an error code will be - returned. - + Models need to be enabled for exporting, otherwise an error code will + be returned. Attributes: name: @@ -2578,19 +2653,18 @@ Required. The desired output location and configuration. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ExportModelRequest) - ), + }, ) _sym_db.RegisterMessage(ExportModelRequest) ExportEvaluatedExamplesRequest = _reflection.GeneratedProtocolMessageType( "ExportEvaluatedExamplesRequest", (_message.Message,), - dict( - DESCRIPTOR=_EXPORTEVALUATEDEXAMPLESREQUEST, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Request message for - [AutoMl.ExportEvaluatedExamples][google.cloud.automl.v1beta1.AutoMl.ExportEvaluatedExamples]. - + { + "DESCRIPTOR": _EXPORTEVALUATEDEXAMPLESREQUEST, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.ExportEvaluatedExamples][google.cloud.auto + ml.v1beta1.AutoMl.ExportEvaluatedExamples]. Attributes: name: @@ -2600,105 +2674,132 @@ Required. The desired output location and configuration. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ExportEvaluatedExamplesRequest) - ), + }, ) _sym_db.RegisterMessage(ExportEvaluatedExamplesRequest) GetModelEvaluationRequest = _reflection.GeneratedProtocolMessageType( "GetModelEvaluationRequest", (_message.Message,), - dict( - DESCRIPTOR=_GETMODELEVALUATIONREQUEST, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Request message for - [AutoMl.GetModelEvaluation][google.cloud.automl.v1beta1.AutoMl.GetModelEvaluation]. - + { + "DESCRIPTOR": _GETMODELEVALUATIONREQUEST, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.GetModelEvaluation][google.cloud.automl.v1 + beta1.AutoMl.GetModelEvaluation]. Attributes: name: - Resource name for the model evaluation. + Required. Resource name for the model evaluation. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.GetModelEvaluationRequest) - ), + }, ) _sym_db.RegisterMessage(GetModelEvaluationRequest) ListModelEvaluationsRequest = _reflection.GeneratedProtocolMessageType( "ListModelEvaluationsRequest", (_message.Message,), - dict( - DESCRIPTOR=_LISTMODELEVALUATIONSREQUEST, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Request message for - [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations]. - + { + "DESCRIPTOR": _LISTMODELEVALUATIONSREQUEST, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Request message for [AutoMl.ListModelEvaluations][google.cloud.automl. + v1beta1.AutoMl.ListModelEvaluations]. Attributes: parent: - Resource name of the model to list the model evaluations for. - If modelId is set as "-", this will list model evaluations - from across all models of the parent location. + Required. Resource name of the model to list the model + evaluations for. If modelId is set as “-”, this will list + model evaluations from across all models of the parent + location. filter: An expression for filtering the results of the request. - ``annotation_spec_id`` - for =, != or existence. See example below for the last. Some examples of using the filter are: - - ``annotation_spec_id!=4`` --> The model evaluation was done + - ``annotation_spec_id!=4`` –> The model evaluation was done for annotation spec with ID different than 4. - ``NOT - annotation_spec_id:*`` --> The model evaluation was done for + annotation_spec_id:*`` –> The model evaluation was done for aggregate of all annotation specs. page_size: Requested page size. page_token: A token identifying a page of results for the server to return. Typically obtained via [ListModelEvaluationsResponse.n - ext\_page\_token][google.cloud.automl.v1beta1.ListModelEvaluat - ionsResponse.next\_page\_token] of the previous [AutoMl.ListMo - delEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEv - aluations] call. + ext_page_token][google.cloud.automl.v1beta1.ListModelEvaluatio + nsResponse.next_page_token] of the previous [AutoMl.ListModelE + valuations][google.cloud.automl.v1beta1.AutoMl.ListModelEvalua + tions] call. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ListModelEvaluationsRequest) - ), + }, ) _sym_db.RegisterMessage(ListModelEvaluationsRequest) ListModelEvaluationsResponse = _reflection.GeneratedProtocolMessageType( "ListModelEvaluationsResponse", (_message.Message,), - dict( - DESCRIPTOR=_LISTMODELEVALUATIONSRESPONSE, - __module__="google.cloud.automl_v1beta1.proto.service_pb2", - __doc__="""Response message for - [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations]. - + { + "DESCRIPTOR": _LISTMODELEVALUATIONSRESPONSE, + "__module__": "google.cloud.automl_v1beta1.proto.service_pb2", + "__doc__": """Response message for [AutoMl.ListModelEvaluations][google.cloud.automl + .v1beta1.AutoMl.ListModelEvaluations]. Attributes: model_evaluation: List of model evaluations in the requested page. next_page_token: A token to retrieve next page of results. Pass to the [ListMod - elEvaluationsRequest.page\_token][google.cloud.automl.v1beta1. - ListModelEvaluationsRequest.page\_token] field of a new [AutoM - l.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.Lis - tModelEvaluations] request to obtain that page. + elEvaluationsRequest.page_token][google.cloud.automl.v1beta1.L + istModelEvaluationsRequest.page_token] field of a new [AutoMl. + ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListM + odelEvaluations] request to obtain that page. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.ListModelEvaluationsResponse) - ), + }, ) _sym_db.RegisterMessage(ListModelEvaluationsResponse) DESCRIPTOR._options = None +_CREATEDATASETREQUEST.fields_by_name["parent"]._options = None +_CREATEDATASETREQUEST.fields_by_name["dataset"]._options = None +_GETDATASETREQUEST.fields_by_name["name"]._options = None +_LISTDATASETSREQUEST.fields_by_name["parent"]._options = None +_UPDATEDATASETREQUEST.fields_by_name["dataset"]._options = None +_DELETEDATASETREQUEST.fields_by_name["name"]._options = None +_IMPORTDATAREQUEST.fields_by_name["name"]._options = None +_IMPORTDATAREQUEST.fields_by_name["input_config"]._options = None +_EXPORTDATAREQUEST.fields_by_name["name"]._options = None +_EXPORTDATAREQUEST.fields_by_name["output_config"]._options = None +_GETANNOTATIONSPECREQUEST.fields_by_name["name"]._options = None +_GETTABLESPECREQUEST.fields_by_name["name"]._options = None +_LISTTABLESPECSREQUEST.fields_by_name["parent"]._options = None +_UPDATETABLESPECREQUEST.fields_by_name["table_spec"]._options = None +_GETCOLUMNSPECREQUEST.fields_by_name["name"]._options = None +_LISTCOLUMNSPECSREQUEST.fields_by_name["parent"]._options = None +_UPDATECOLUMNSPECREQUEST.fields_by_name["column_spec"]._options = None +_CREATEMODELREQUEST.fields_by_name["parent"]._options = None +_CREATEMODELREQUEST.fields_by_name["model"]._options = None +_GETMODELREQUEST.fields_by_name["name"]._options = None +_LISTMODELSREQUEST.fields_by_name["parent"]._options = None +_DELETEMODELREQUEST.fields_by_name["name"]._options = None +_DEPLOYMODELREQUEST.fields_by_name["name"]._options = None +_UNDEPLOYMODELREQUEST.fields_by_name["name"]._options = None +_EXPORTMODELREQUEST.fields_by_name["name"]._options = None +_EXPORTMODELREQUEST.fields_by_name["output_config"]._options = None +_EXPORTEVALUATEDEXAMPLESREQUEST.fields_by_name["name"]._options = None +_EXPORTEVALUATEDEXAMPLESREQUEST.fields_by_name["output_config"]._options = None +_GETMODELEVALUATIONREQUEST.fields_by_name["name"]._options = None +_LISTMODELEVALUATIONSREQUEST.fields_by_name["parent"]._options = None _AUTOML = _descriptor.ServiceDescriptor( name="AutoMl", full_name="google.cloud.automl.v1beta1.AutoMl", file=DESCRIPTOR, index=0, - serialized_options=_b( - "\312A\025automl.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform" - ), - serialized_start=3671, - serialized_end=8112, + serialized_options=b"\312A\025automl.googleapis.com\322A.https://www.googleapis.com/auth/cloud-platform", + create_key=_descriptor._internal_create_key, + serialized_start=4628, + serialized_end=9729, methods=[ _descriptor.MethodDescriptor( name="CreateDataset", @@ -2707,9 +2808,8 @@ containing_service=None, input_type=_CREATEDATASETREQUEST, output_type=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_dataset__pb2._DATASET, - serialized_options=_b( - '\202\323\344\223\002<"1/v1beta1/{parent=projects/*/locations/*}/datasets:\007dataset' - ), + serialized_options=b'\202\323\344\223\002<"1/v1beta1/{parent=projects/*/locations/*}/datasets:\007dataset\332A\016parent,dataset', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="GetDataset", @@ -2718,9 +2818,8 @@ containing_service=None, input_type=_GETDATASETREQUEST, output_type=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_dataset__pb2._DATASET, - serialized_options=_b( - "\202\323\344\223\0023\0221/v1beta1/{name=projects/*/locations/*/datasets/*}" - ), + serialized_options=b"\202\323\344\223\0023\0221/v1beta1/{name=projects/*/locations/*/datasets/*}\332A\004name", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="ListDatasets", @@ -2729,9 +2828,8 @@ containing_service=None, input_type=_LISTDATASETSREQUEST, output_type=_LISTDATASETSRESPONSE, - serialized_options=_b( - "\202\323\344\223\0023\0221/v1beta1/{parent=projects/*/locations/*}/datasets" - ), + serialized_options=b"\202\323\344\223\0023\0221/v1beta1/{parent=projects/*/locations/*}/datasets\332A\006parent", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="UpdateDataset", @@ -2740,9 +2838,8 @@ containing_service=None, input_type=_UPDATEDATASETREQUEST, output_type=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_dataset__pb2._DATASET, - serialized_options=_b( - "\202\323\344\223\002D29/v1beta1/{dataset.name=projects/*/locations/*/datasets/*}:\007dataset" - ), + serialized_options=b"\202\323\344\223\002D29/v1beta1/{dataset.name=projects/*/locations/*/datasets/*}:\007dataset\332A\007dataset", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="DeleteDataset", @@ -2751,9 +2848,8 @@ containing_service=None, input_type=_DELETEDATASETREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - "\202\323\344\223\0023*1/v1beta1/{name=projects/*/locations/*/datasets/*}" - ), + serialized_options=b"\202\323\344\223\0023*1/v1beta1/{name=projects/*/locations/*/datasets/*}\332A\004name\312A*\n\025google.protobuf.Empty\022\021OperationMetadata", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="ImportData", @@ -2762,9 +2858,8 @@ containing_service=None, input_type=_IMPORTDATAREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - '\202\323\344\223\002A"/v1beta1/{name=projects/*/locations/*/datasets/*/tableSpecs/*}" - ), + serialized_options=b"\202\323\344\223\002@\022>/v1beta1/{name=projects/*/locations/*/datasets/*/tableSpecs/*}\332A\004name", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="ListTableSpecs", @@ -2806,9 +2898,8 @@ containing_service=None, input_type=_LISTTABLESPECSREQUEST, output_type=_LISTTABLESPECSRESPONSE, - serialized_options=_b( - "\202\323\344\223\002@\022>/v1beta1/{parent=projects/*/locations/*/datasets/*}/tableSpecs" - ), + serialized_options=b"\202\323\344\223\002@\022>/v1beta1/{parent=projects/*/locations/*/datasets/*}/tableSpecs\332A\006parent", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="UpdateTableSpec", @@ -2817,9 +2908,8 @@ containing_service=None, input_type=_UPDATETABLESPECREQUEST, output_type=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_table__spec__pb2._TABLESPEC, - serialized_options=_b( - "\202\323\344\223\002W2I/v1beta1/{table_spec.name=projects/*/locations/*/datasets/*/tableSpecs/*}:\ntable_spec" - ), + serialized_options=b"\202\323\344\223\002W2I/v1beta1/{table_spec.name=projects/*/locations/*/datasets/*/tableSpecs/*}:\ntable_spec\332A\ntable_spec", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="GetColumnSpec", @@ -2828,9 +2918,8 @@ containing_service=None, input_type=_GETCOLUMNSPECREQUEST, output_type=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_column__spec__pb2._COLUMNSPEC, - serialized_options=_b( - "\202\323\344\223\002N\022L/v1beta1/{name=projects/*/locations/*/datasets/*/tableSpecs/*/columnSpecs/*}" - ), + serialized_options=b"\202\323\344\223\002N\022L/v1beta1/{name=projects/*/locations/*/datasets/*/tableSpecs/*/columnSpecs/*}\332A\004name", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="ListColumnSpecs", @@ -2839,9 +2928,8 @@ containing_service=None, input_type=_LISTCOLUMNSPECSREQUEST, output_type=_LISTCOLUMNSPECSRESPONSE, - serialized_options=_b( - "\202\323\344\223\002N\022L/v1beta1/{parent=projects/*/locations/*/datasets/*/tableSpecs/*}/columnSpecs" - ), + serialized_options=b"\202\323\344\223\002N\022L/v1beta1/{parent=projects/*/locations/*/datasets/*/tableSpecs/*}/columnSpecs\332A\006parent", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="UpdateColumnSpec", @@ -2850,9 +2938,8 @@ containing_service=None, input_type=_UPDATECOLUMNSPECREQUEST, output_type=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_column__spec__pb2._COLUMNSPEC, - serialized_options=_b( - "\202\323\344\223\002g2X/v1beta1/{column_spec.name=projects/*/locations/*/datasets/*/tableSpecs/*/columnSpecs/*}:\013column_spec" - ), + serialized_options=b"\202\323\344\223\002g2X/v1beta1/{column_spec.name=projects/*/locations/*/datasets/*/tableSpecs/*/columnSpecs/*}:\013column_spec\332A\013column_spec", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="CreateModel", @@ -2861,9 +2948,8 @@ containing_service=None, input_type=_CREATEMODELREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - '\202\323\344\223\0028"//v1beta1/{parent=projects/*/locations/*}/models:\005model' - ), + serialized_options=b'\202\323\344\223\0028"//v1beta1/{parent=projects/*/locations/*}/models:\005model\332A\014parent,model\312A\032\n\005Model\022\021OperationMetadata', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="GetModel", @@ -2872,9 +2958,8 @@ containing_service=None, input_type=_GETMODELREQUEST, output_type=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_model__pb2._MODEL, - serialized_options=_b( - "\202\323\344\223\0021\022//v1beta1/{name=projects/*/locations/*/models/*}" - ), + serialized_options=b"\202\323\344\223\0021\022//v1beta1/{name=projects/*/locations/*/models/*}\332A\004name", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="ListModels", @@ -2883,9 +2968,8 @@ containing_service=None, input_type=_LISTMODELSREQUEST, output_type=_LISTMODELSRESPONSE, - serialized_options=_b( - "\202\323\344\223\0021\022//v1beta1/{parent=projects/*/locations/*}/models" - ), + serialized_options=b"\202\323\344\223\0021\022//v1beta1/{parent=projects/*/locations/*}/models\332A\006parent", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="DeleteModel", @@ -2894,9 +2978,8 @@ containing_service=None, input_type=_DELETEMODELREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - "\202\323\344\223\0021*//v1beta1/{name=projects/*/locations/*/models/*}" - ), + serialized_options=b"\202\323\344\223\0021*//v1beta1/{name=projects/*/locations/*/models/*}\332A\004name\312A*\n\025google.protobuf.Empty\022\021OperationMetadata", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="DeployModel", @@ -2905,9 +2988,8 @@ containing_service=None, input_type=_DEPLOYMODELREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - '\202\323\344\223\002;"6/v1beta1/{name=projects/*/locations/*/models/*}:deploy:\001*' - ), + serialized_options=b'\202\323\344\223\002;"6/v1beta1/{name=projects/*/locations/*/models/*}:deploy:\001*\332A\004name\312A*\n\025google.protobuf.Empty\022\021OperationMetadata', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="UndeployModel", @@ -2916,9 +2998,8 @@ containing_service=None, input_type=_UNDEPLOYMODELREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - '\202\323\344\223\002="8/v1beta1/{name=projects/*/locations/*/models/*}:undeploy:\001*' - ), + serialized_options=b'\202\323\344\223\002="8/v1beta1/{name=projects/*/locations/*/models/*}:undeploy:\001*\332A\004name\312A*\n\025google.protobuf.Empty\022\021OperationMetadata', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="ExportModel", @@ -2927,9 +3008,8 @@ containing_service=None, input_type=_EXPORTMODELREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - '\202\323\344\223\002;"6/v1beta1/{name=projects/*/locations/*/models/*}:export:\001*' - ), + serialized_options=b'\202\323\344\223\002;"6/v1beta1/{name=projects/*/locations/*/models/*}:export:\001*\332A\022name,output_config\312A*\n\025google.protobuf.Empty\022\021OperationMetadata', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="ExportEvaluatedExamples", @@ -2938,9 +3018,8 @@ containing_service=None, input_type=_EXPORTEVALUATEDEXAMPLESREQUEST, output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, - serialized_options=_b( - '\202\323\344\223\002L"G/v1beta1/{name=projects/*/locations/*/models/*}:exportEvaluatedExamples:\001*' - ), + serialized_options=b'\202\323\344\223\002L"G/v1beta1/{name=projects/*/locations/*/models/*}:exportEvaluatedExamples:\001*\332A\022name,output_config\312A*\n\025google.protobuf.Empty\022\021OperationMetadata', + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="GetModelEvaluation", @@ -2949,9 +3028,8 @@ containing_service=None, input_type=_GETMODELEVALUATIONREQUEST, output_type=google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_model__evaluation__pb2._MODELEVALUATION, - serialized_options=_b( - "\202\323\344\223\002D\022B/v1beta1/{name=projects/*/locations/*/models/*/modelEvaluations/*}" - ), + serialized_options=b"\202\323\344\223\002D\022B/v1beta1/{name=projects/*/locations/*/models/*/modelEvaluations/*}\332A\004name", + create_key=_descriptor._internal_create_key, ), _descriptor.MethodDescriptor( name="ListModelEvaluations", @@ -2960,9 +3038,8 @@ containing_service=None, input_type=_LISTMODELEVALUATIONSREQUEST, output_type=_LISTMODELEVALUATIONSRESPONSE, - serialized_options=_b( - "\202\323\344\223\002D\022B/v1beta1/{parent=projects/*/locations/*/models/*}/modelEvaluations" - ), + serialized_options=b"\202\323\344\223\002D\022B/v1beta1/{parent=projects/*/locations/*/models/*}/modelEvaluations\332A\006parent", + create_key=_descriptor._internal_create_key, ), ], ) diff --git a/google/cloud/automl_v1beta1/proto/service_pb2_grpc.py b/google/cloud/automl_v1beta1/proto/service_pb2_grpc.py index eb049c20..efb69009 100644 --- a/google/cloud/automl_v1beta1/proto/service_pb2_grpc.py +++ b/google/cloud/automl_v1beta1/proto/service_pb2_grpc.py @@ -345,7 +345,8 @@ def DeployModel(self, request, context): [node_number][google.cloud.automl.v1beta1.ImageObjectDetectionModelDeploymentMetadata.node_number]) will reset the deployment state without pausing the model's availability. - Only applicable for Text Classification, Image Object Detection and Tables; all other domains manage deployment automatically. + Only applicable for Text Classification, Image Object Detection , Tables, and Image Segmentation; all other domains manage + deployment automatically. Returns an empty response in the [response][google.longrunning.Operation.response] field when it completes. diff --git a/google/cloud/automl_v1beta1/proto/table_spec.proto b/google/cloud/automl_v1beta1/proto/table_spec.proto index 4475617a..bc3fc744 100644 --- a/google/cloud/automl_v1beta1/proto/table_spec.proto +++ b/google/cloud/automl_v1beta1/proto/table_spec.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,12 +11,12 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; package google.cloud.automl.v1beta1; +import "google/api/resource.proto"; import "google/cloud/automl/v1beta1/io.proto"; import "google/api/annotations.proto"; @@ -36,6 +36,11 @@ option ruby_package = "Google::Cloud::AutoML::V1beta1"; // Used by: // * Tables message TableSpec { + option (google.api.resource) = { + type: "automl.googleapis.com/TableSpec" + pattern: "projects/{project}/locations/{location}/datasets/{dataset}/tableSpecs/{table_spec}" + }; + // Output only. The resource name of the table spec. // Form: // diff --git a/google/cloud/automl_v1beta1/proto/table_spec_pb2.py b/google/cloud/automl_v1beta1/proto/table_spec_pb2.py index 48aa9178..a6934cea 100644 --- a/google/cloud/automl_v1beta1/proto/table_spec_pb2.py +++ b/google/cloud/automl_v1beta1/proto/table_spec_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/table_spec.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -15,6 +12,7 @@ _sym_db = _symbol_database.Default() +from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 from google.cloud.automl_v1beta1.proto import ( io_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_io__pb2, ) @@ -25,13 +23,11 @@ name="google/cloud/automl_v1beta1/proto/table_spec.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - '\n2google/cloud/automl_v1beta1/proto/table_spec.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a*google/cloud/automl_v1beta1/proto/io.proto\x1a\x1cgoogle/api/annotations.proto"\xc7\x01\n\tTableSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1b\n\x13time_column_spec_id\x18\x02 \x01(\t\x12\x11\n\trow_count\x18\x03 \x01(\x03\x12\x17\n\x0fvalid_row_count\x18\x04 \x01(\x03\x12\x14\n\x0c\x63olumn_count\x18\x07 \x01(\x03\x12?\n\rinput_configs\x18\x05 \x03(\x0b\x32(.google.cloud.automl.v1beta1.InputConfig\x12\x0c\n\x04\x65tag\x18\x06 \x01(\tB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3' - ), + serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n2google/cloud/automl_v1beta1/proto/table_spec.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x19google/api/resource.proto\x1a*google/cloud/automl_v1beta1/proto/io.proto\x1a\x1cgoogle/api/annotations.proto"\xc1\x02\n\tTableSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1b\n\x13time_column_spec_id\x18\x02 \x01(\t\x12\x11\n\trow_count\x18\x03 \x01(\x03\x12\x17\n\x0fvalid_row_count\x18\x04 \x01(\x03\x12\x14\n\x0c\x63olumn_count\x18\x07 \x01(\x03\x12?\n\rinput_configs\x18\x05 \x03(\x0b\x32(.google.cloud.automl.v1beta1.InputConfig\x12\x0c\n\x04\x65tag\x18\x06 \x01(\t:x\xea\x41u\n\x1f\x61utoml.googleapis.com/TableSpec\x12Rprojects/{project}/locations/{location}/datasets/{dataset}/tableSpecs/{table_spec}B\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ + google_dot_api_dot_resource__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_io__pb2.DESCRIPTOR, google_dot_api_dot_annotations__pb2.DESCRIPTOR, ], @@ -44,6 +40,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="name", @@ -54,7 +51,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -62,6 +59,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="time_column_spec_id", @@ -72,7 +70,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -80,6 +78,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="row_count", @@ -98,6 +97,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="valid_row_count", @@ -116,6 +116,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="column_count", @@ -134,6 +135,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="input_configs", @@ -152,6 +154,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="etag", @@ -162,7 +165,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -170,18 +173,19 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], nested_types=[], enum_types=[], - serialized_options=None, + serialized_options=b"\352Au\n\037automl.googleapis.com/TableSpec\022Rprojects/{project}/locations/{location}/datasets/{dataset}/tableSpecs/{table_spec}", is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=158, - serialized_end=357, + serialized_start=185, + serialized_end=506, ) _TABLESPEC.fields_by_name[ @@ -193,37 +197,36 @@ TableSpec = _reflection.GeneratedProtocolMessageType( "TableSpec", (_message.Message,), - dict( - DESCRIPTOR=_TABLESPEC, - __module__="google.cloud.automl_v1beta1.proto.table_spec_pb2", - __doc__="""A specification of a relational table. The table's schema - is represented via its child column specs. It is pre-populated as part - of ImportData by schema inference algorithm, the version of which is a - required parameter of ImportData InputConfig. Note: While working with a - table, at times the schema may be inconsistent with the data in the - table (e.g. string in a FLOAT64 column). The consistency validation is + { + "DESCRIPTOR": _TABLESPEC, + "__module__": "google.cloud.automl_v1beta1.proto.table_spec_pb2", + "__doc__": """A specification of a relational table. The table’s schema is + represented via its child column specs. It is pre-populated as part of + ImportData by schema inference algorithm, the version of which is a + required parameter of ImportData InputConfig. Note: While working with + a table, at times the schema may be inconsistent with the data in the + table (e.g. string in a FLOAT64 column). The consistency validation is done upon creation of a model. Used by: \* Tables - Attributes: name: Output only. The resource name of the table spec. Form: ``pro jects/{project_id}/locations/{location_id}/datasets/{dataset_i d}/tableSpecs/{table_spec_id}`` time_column_spec_id: - column\_spec\_id of the time column. Only used if the parent - dataset's ml\_use\_column\_spec\_id is not set. Used to split - rows into TRAIN, VALIDATE and TEST sets such that oldest rows - go to TRAIN set, newest to TEST, and those in between to - VALIDATE. Required type: TIMESTAMP. If both this column and - ml\_use\_column are not set, then ML use of all rows will be + column_spec_id of the time column. Only used if the parent + dataset’s ml_use_column_spec_id is not set. Used to split rows + into TRAIN, VALIDATE and TEST sets such that oldest rows go to + TRAIN set, newest to TEST, and those in between to VALIDATE. + Required type: TIMESTAMP. If both this column and + ml_use_column are not set, then ML use of all rows will be assigned by AutoML. NOTE: Updates of this field will instantly affect any other users concurrently working with the dataset. row_count: - Output only. The number of rows (i.e. examples) in the table. + Output only. The number of rows (i.e. examples) in the table. valid_row_count: - Output only. The number of valid rows (i.e. without values - that don't match DataType-s of their columns). + Output only. The number of valid rows (i.e. without values + that don’t match DataType-s of their columns). column_count: Output only. The number of columns of the table. That is, the number of child ColumnSpec-s. @@ -232,13 +235,14 @@ in the table had been imported. etag: Used to perform consistent read-modify-write updates. If not - set, a blind "overwrite" update happens. + set, a blind “overwrite” update happens. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TableSpec) - ), + }, ) _sym_db.RegisterMessage(TableSpec) DESCRIPTOR._options = None +_TABLESPEC._options = None # @@protoc_insertion_point(module_scope) diff --git a/google/cloud/automl_v1beta1/proto/tables.proto b/google/cloud/automl_v1beta1/proto/tables.proto index 5b786c9f..5327f5e7 100644 --- a/google/cloud/automl_v1beta1/proto/tables.proto +++ b/google/cloud/automl_v1beta1/proto/tables.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,21 +11,21 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; package google.cloud.automl.v1beta1; -import "google/api/annotations.proto"; import "google/cloud/automl/v1beta1/classification.proto"; import "google/cloud/automl/v1beta1/column_spec.proto"; import "google/cloud/automl/v1beta1/data_items.proto"; import "google/cloud/automl/v1beta1/data_stats.proto"; import "google/cloud/automl/v1beta1/ranges.proto"; +import "google/cloud/automl/v1beta1/regression.proto"; import "google/cloud/automl/v1beta1/temporal.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/timestamp.proto"; +import "google/api/annotations.proto"; option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl"; option java_multiple_files = true; @@ -249,6 +249,15 @@ message TablesAnnotation { // [column_display_name][google.cloud.automl.v1beta1.TablesModelColumnInfo.column_display_name] // would be populated, then this whole field is not. repeated TablesModelColumnInfo tables_model_column_info = 3; + + // Output only. Stores the prediction score for the baseline example, which + // is defined as the example with all values set to their baseline values. + // This is used as part of the Sampled Shapley explanation of the model's + // prediction. This field is populated only when feature importance is + // requested. For regression models, this holds the baseline prediction for + // the baseline example. For classification models, this holds the baseline + // prediction for the baseline example for the argmax class. + float baseline_score = 5; } // An information specific to given column and Tables Model, in context diff --git a/google/cloud/automl_v1beta1/proto/tables_pb2.py b/google/cloud/automl_v1beta1/proto/tables_pb2.py index 4659aa8d..7335fe3d 100644 --- a/google/cloud/automl_v1beta1/proto/tables_pb2.py +++ b/google/cloud/automl_v1beta1/proto/tables_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/tables.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -15,7 +12,6 @@ _sym_db = _symbol_database.Default() -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 from google.cloud.automl_v1beta1.proto import ( classification_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2, ) @@ -31,33 +27,35 @@ from google.cloud.automl_v1beta1.proto import ( ranges_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_ranges__pb2, ) +from google.cloud.automl_v1beta1.proto import ( + regression_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_regression__pb2, +) from google.cloud.automl_v1beta1.proto import ( temporal_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_temporal__pb2, ) from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2 from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name="google/cloud/automl_v1beta1/proto/tables.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - '\n.google/cloud/automl_v1beta1/proto/tables.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto\x1a\x33google/cloud/automl_v1beta1/proto/column_spec.proto\x1a\x32google/cloud/automl_v1beta1/proto/data_items.proto\x1a\x32google/cloud/automl_v1beta1/proto/data_stats.proto\x1a.google/cloud/automl_v1beta1/proto/ranges.proto\x1a\x30google/cloud/automl_v1beta1/proto/temporal.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xb0\x03\n\x15TablesDatasetMetadata\x12\x1d\n\x15primary_table_spec_id\x18\x01 \x01(\t\x12\x1d\n\x15target_column_spec_id\x18\x02 \x01(\t\x12\x1d\n\x15weight_column_spec_id\x18\x03 \x01(\t\x12\x1d\n\x15ml_use_column_spec_id\x18\x04 \x01(\t\x12t\n\x1atarget_column_correlations\x18\x06 \x03(\x0b\x32P.google.cloud.automl.v1beta1.TablesDatasetMetadata.TargetColumnCorrelationsEntry\x12\x35\n\x11stats_update_time\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x1an\n\x1dTargetColumnCorrelationsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12<\n\x05value\x18\x02 \x01(\x0b\x32-.google.cloud.automl.v1beta1.CorrelationStats:\x02\x38\x01"\x96\x04\n\x13TablesModelMetadata\x12-\n#optimization_objective_recall_value\x18\x11 \x01(\x02H\x00\x12\x30\n&optimization_objective_precision_value\x18\x12 \x01(\x02H\x00\x12\x43\n\x12target_column_spec\x18\x02 \x01(\x0b\x32\'.google.cloud.automl.v1beta1.ColumnSpec\x12K\n\x1ainput_feature_column_specs\x18\x03 \x03(\x0b\x32\'.google.cloud.automl.v1beta1.ColumnSpec\x12\x1e\n\x16optimization_objective\x18\x04 \x01(\t\x12T\n\x18tables_model_column_info\x18\x05 \x03(\x0b\x32\x32.google.cloud.automl.v1beta1.TablesModelColumnInfo\x12%\n\x1dtrain_budget_milli_node_hours\x18\x06 \x01(\x03\x12#\n\x1btrain_cost_milli_node_hours\x18\x07 \x01(\x03\x12\x1e\n\x16\x64isable_early_stopping\x18\x0c \x01(\x08\x42*\n(additional_optimization_objective_config"\xe5\x01\n\x10TablesAnnotation\x12\r\n\x05score\x18\x01 \x01(\x02\x12\x45\n\x13prediction_interval\x18\x04 \x01(\x0b\x32(.google.cloud.automl.v1beta1.DoubleRange\x12%\n\x05value\x18\x02 \x01(\x0b\x32\x16.google.protobuf.Value\x12T\n\x18tables_model_column_info\x18\x03 \x03(\x0b\x32\x32.google.cloud.automl.v1beta1.TablesModelColumnInfo"j\n\x15TablesModelColumnInfo\x12\x18\n\x10\x63olumn_spec_name\x18\x01 \x01(\t\x12\x1b\n\x13\x63olumn_display_name\x18\x02 \x01(\t\x12\x1a\n\x12\x66\x65\x61ture_importance\x18\x03 \x01(\x02\x42\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3' - ), + serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n.google/cloud/automl_v1beta1/proto/tables.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto\x1a\x33google/cloud/automl_v1beta1/proto/column_spec.proto\x1a\x32google/cloud/automl_v1beta1/proto/data_items.proto\x1a\x32google/cloud/automl_v1beta1/proto/data_stats.proto\x1a.google/cloud/automl_v1beta1/proto/ranges.proto\x1a\x32google/cloud/automl_v1beta1/proto/regression.proto\x1a\x30google/cloud/automl_v1beta1/proto/temporal.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/api/annotations.proto"\xb0\x03\n\x15TablesDatasetMetadata\x12\x1d\n\x15primary_table_spec_id\x18\x01 \x01(\t\x12\x1d\n\x15target_column_spec_id\x18\x02 \x01(\t\x12\x1d\n\x15weight_column_spec_id\x18\x03 \x01(\t\x12\x1d\n\x15ml_use_column_spec_id\x18\x04 \x01(\t\x12t\n\x1atarget_column_correlations\x18\x06 \x03(\x0b\x32P.google.cloud.automl.v1beta1.TablesDatasetMetadata.TargetColumnCorrelationsEntry\x12\x35\n\x11stats_update_time\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x1an\n\x1dTargetColumnCorrelationsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12<\n\x05value\x18\x02 \x01(\x0b\x32-.google.cloud.automl.v1beta1.CorrelationStats:\x02\x38\x01"\x96\x04\n\x13TablesModelMetadata\x12-\n#optimization_objective_recall_value\x18\x11 \x01(\x02H\x00\x12\x30\n&optimization_objective_precision_value\x18\x12 \x01(\x02H\x00\x12\x43\n\x12target_column_spec\x18\x02 \x01(\x0b\x32\'.google.cloud.automl.v1beta1.ColumnSpec\x12K\n\x1ainput_feature_column_specs\x18\x03 \x03(\x0b\x32\'.google.cloud.automl.v1beta1.ColumnSpec\x12\x1e\n\x16optimization_objective\x18\x04 \x01(\t\x12T\n\x18tables_model_column_info\x18\x05 \x03(\x0b\x32\x32.google.cloud.automl.v1beta1.TablesModelColumnInfo\x12%\n\x1dtrain_budget_milli_node_hours\x18\x06 \x01(\x03\x12#\n\x1btrain_cost_milli_node_hours\x18\x07 \x01(\x03\x12\x1e\n\x16\x64isable_early_stopping\x18\x0c \x01(\x08\x42*\n(additional_optimization_objective_config"\xfd\x01\n\x10TablesAnnotation\x12\r\n\x05score\x18\x01 \x01(\x02\x12\x45\n\x13prediction_interval\x18\x04 \x01(\x0b\x32(.google.cloud.automl.v1beta1.DoubleRange\x12%\n\x05value\x18\x02 \x01(\x0b\x32\x16.google.protobuf.Value\x12T\n\x18tables_model_column_info\x18\x03 \x03(\x0b\x32\x32.google.cloud.automl.v1beta1.TablesModelColumnInfo\x12\x16\n\x0e\x62\x61seline_score\x18\x05 \x01(\x02"j\n\x15TablesModelColumnInfo\x12\x18\n\x10\x63olumn_spec_name\x18\x01 \x01(\t\x12\x1b\n\x13\x63olumn_display_name\x18\x02 \x01(\t\x12\x1a\n\x12\x66\x65\x61ture_importance\x18\x03 \x01(\x02\x42\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_column__spec__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_data__items__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_data__stats__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_ranges__pb2.DESCRIPTOR, + google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_regression__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_temporal__pb2.DESCRIPTOR, google_dot_protobuf_dot_struct__pb2.DESCRIPTOR, google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, + google_dot_api_dot_annotations__pb2.DESCRIPTOR, ], ) @@ -68,6 +66,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="key", @@ -78,7 +77,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -86,6 +85,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="value", @@ -104,18 +104,19 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], nested_types=[], enum_types=[], - serialized_options=_b("8\001"), + serialized_options=b"8\001", is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=806, - serialized_end=916, + serialized_start=858, + serialized_end=968, ) _TABLESDATASETMETADATA = _descriptor.Descriptor( @@ -124,6 +125,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="primary_table_spec_id", @@ -134,7 +136,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -142,6 +144,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="target_column_spec_id", @@ -152,7 +155,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -160,6 +163,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="weight_column_spec_id", @@ -170,7 +174,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -178,6 +182,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="ml_use_column_spec_id", @@ -188,7 +193,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -196,6 +201,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="target_column_correlations", @@ -214,6 +220,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="stats_update_time", @@ -232,18 +239,19 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], - nested_types=[_TABLESDATASETMETADATA_TARGETCOLUMNCORRELATIONSENTRY], + nested_types=[_TABLESDATASETMETADATA_TARGETCOLUMNCORRELATIONSENTRY,], enum_types=[], serialized_options=None, is_extendable=False, syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=484, - serialized_end=916, + serialized_start=536, + serialized_end=968, ) @@ -253,6 +261,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="optimization_objective_recall_value", @@ -271,6 +280,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="optimization_objective_precision_value", @@ -289,6 +299,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="target_column_spec", @@ -307,6 +318,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="input_feature_column_specs", @@ -325,6 +337,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="optimization_objective", @@ -335,7 +348,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -343,6 +356,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="tables_model_column_info", @@ -361,6 +375,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="train_budget_milli_node_hours", @@ -379,6 +394,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="train_cost_milli_node_hours", @@ -397,6 +413,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="disable_early_stopping", @@ -415,6 +432,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -430,11 +448,12 @@ full_name="google.cloud.automl.v1beta1.TablesModelMetadata.additional_optimization_objective_config", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], - ) + ), ], - serialized_start=919, - serialized_end=1453, + serialized_start=971, + serialized_end=1505, ) @@ -444,6 +463,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="score", @@ -462,6 +482,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="prediction_interval", @@ -480,6 +501,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="value", @@ -498,6 +520,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="tables_model_column_info", @@ -516,6 +539,26 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + ), + _descriptor.FieldDescriptor( + name="baseline_score", + full_name="google.cloud.automl.v1beta1.TablesAnnotation.baseline_score", + index=4, + number=5, + type=2, + cpp_type=6, + label=1, + has_default_value=False, + default_value=float(0), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -526,8 +569,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1456, - serialized_end=1685, + serialized_start=1508, + serialized_end=1761, ) @@ -537,6 +580,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="column_spec_name", @@ -547,7 +591,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -555,6 +599,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="column_display_name", @@ -565,7 +610,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -573,6 +618,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="feature_importance", @@ -591,6 +637,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -601,8 +648,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1687, - serialized_end=1793, + serialized_start=1763, + serialized_end=1869, ) _TABLESDATASETMETADATA_TARGETCOLUMNCORRELATIONSENTRY.fields_by_name[ @@ -672,27 +719,26 @@ TablesDatasetMetadata = _reflection.GeneratedProtocolMessageType( "TablesDatasetMetadata", (_message.Message,), - dict( - TargetColumnCorrelationsEntry=_reflection.GeneratedProtocolMessageType( + { + "TargetColumnCorrelationsEntry": _reflection.GeneratedProtocolMessageType( "TargetColumnCorrelationsEntry", (_message.Message,), - dict( - DESCRIPTOR=_TABLESDATASETMETADATA_TARGETCOLUMNCORRELATIONSENTRY, - __module__="google.cloud.automl_v1beta1.proto.tables_pb2" + { + "DESCRIPTOR": _TABLESDATASETMETADATA_TARGETCOLUMNCORRELATIONSENTRY, + "__module__": "google.cloud.automl_v1beta1.proto.tables_pb2" # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TablesDatasetMetadata.TargetColumnCorrelationsEntry) - ), + }, ), - DESCRIPTOR=_TABLESDATASETMETADATA, - __module__="google.cloud.automl_v1beta1.proto.tables_pb2", - __doc__="""Metadata for a dataset used for AutoML Tables. - + "DESCRIPTOR": _TABLESDATASETMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.tables_pb2", + "__doc__": """Metadata for a dataset used for AutoML Tables. Attributes: primary_table_spec_id: - Output only. The table\_spec\_id of the primary table of this + Output only. The table_spec_id of the primary table of this dataset. target_column_spec_id: - column\_spec\_id of the primary table's column that should be + column_spec_id of the primary table’s column that should be used as the training & prediction target. This column must be non-nullable and have one of following data types (otherwise model creation will error): - CATEGORY - FLOAT64 If the @@ -701,8 +747,8 @@ instantly affect any other users concurrently working with the dataset. weight_column_spec_id: - column\_spec\_id of the primary table's column that should be - used as the weight column, i.e. the higher the value the more + column_spec_id of the primary table’s column that should be + used as the weight column, i.e. the higher the value the more important the row will be during model training. Required type: FLOAT64. Allowed values: 0 to 10000, inclusive on both ends; 0 means the row is ignored for training. If not set all @@ -710,40 +756,40 @@ this field will instantly affect any other users concurrently working with the dataset. ml_use_column_spec_id: - column\_spec\_id of the primary table column which specifies a - possible ML use of the row, i.e. the column will be used to + column_spec_id of the primary table column which specifies a + possible ML use of the row, i.e. the column will be used to split the rows into TRAIN, VALIDATE and TEST sets. Required type: STRING. This column, if set, must either have all of ``TRAIN``, ``VALIDATE``, ``TEST`` among its values, or only have ``TEST``, ``UNASSIGNED`` values. In the latter case the rows with ``UNASSIGNED`` value will be assigned by AutoML. Note that if a given ml use distribution makes it impossible - to create a "good" model, that call will error describing the - issue. If both this column\_spec\_id and primary table's - time\_column\_spec\_id are not set, then all rows are treated - as ``UNASSIGNED``. NOTE: Updates of this field will instantly + to create a “good” model, that call will error describing the + issue. If both this column_spec_id and primary table’s + time_column_spec_id are not set, then all rows are treated as + ``UNASSIGNED``. NOTE: Updates of this field will instantly affect any other users concurrently working with the dataset. target_column_correlations: Output only. Correlations between [TablesDatasetMetadata.targ - et\_column\_spec\_id][google.cloud.automl.v1beta1.TablesDatase - tMetadata.target\_column\_spec\_id], and other columns of the - [TablesDatasetMetadataprimary\_table][google.cloud.automl.v1be - ta1.TablesDatasetMetadata.primary\_table\_spec\_id]. Only set - if the target column is set. Mapping from other column spec id - to its CorrelationStats with the target column. This field may - be stale, see the stats\_update\_time field for for the - timestamp at which these stats were last updated. + et_column_spec_id][google.cloud.automl.v1beta1.TablesDatasetMe + tadata.target_column_spec_id], and other columns of the [Tabl + esDatasetMetadataprimary_table][google.cloud.automl.v1beta1.Ta + blesDatasetMetadata.primary_table_spec_id]. Only set if the + target column is set. Mapping from other column spec id to its + CorrelationStats with the target column. This field may be + stale, see the stats_update_time field for for the timestamp + at which these stats were last updated. stats_update_time: Output only. The most recent timestamp when - target\_column\_correlations field and all descendant - ColumnSpec.data\_stats and ColumnSpec.top\_correlated\_columns + target_column_correlations field and all descendant + ColumnSpec.data_stats and ColumnSpec.top_correlated_columns fields were last (re-)generated. Any changes that happened to the dataset afterwards are not reflected in these fields values. The regeneration happens in the background on a best effort basis. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TablesDatasetMetadata) - ), + }, ) _sym_db.RegisterMessage(TablesDatasetMetadata) _sym_db.RegisterMessage(TablesDatasetMetadata.TargetColumnCorrelationsEntry) @@ -751,11 +797,10 @@ TablesModelMetadata = _reflection.GeneratedProtocolMessageType( "TablesModelMetadata", (_message.Message,), - dict( - DESCRIPTOR=_TABLESMODELMETADATA, - __module__="google.cloud.automl_v1beta1.proto.tables_pb2", - __doc__="""Model metadata specific to AutoML Tables. - + { + "DESCRIPTOR": _TABLESMODELMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.tables_pb2", + "__doc__": """Model metadata specific to AutoML Tables. Attributes: additional_optimization_objective_config: @@ -763,74 +808,73 @@ ``MAXIMIZE_PRECISION_AT_RECALL`` and ``MAXIMIZE_RECALL_AT_PRECISION``, otherwise unused. optimization_objective_recall_value: - Required when optimization\_objective is - "MAXIMIZE\_PRECISION\_AT\_RECALL". Must be between 0 and 1, + Required when optimization_objective is + “MAXIMIZE_PRECISION_AT_RECALL”. Must be between 0 and 1, inclusive. optimization_objective_precision_value: - Required when optimization\_objective is - "MAXIMIZE\_RECALL\_AT\_PRECISION". Must be between 0 and 1, + Required when optimization_objective is + “MAXIMIZE_RECALL_AT_PRECISION”. Must be between 0 and 1, inclusive. target_column_spec: - Column spec of the dataset's primary table's column the model + Column spec of the dataset’s primary table’s column the model is predicting. Snapshotted when model creation started. Only 3 - fields are used: name - May be set on CreateModel, if it's not + fields are used: name - May be set on CreateModel, if it’s not then the ColumnSpec corresponding to the current - target\_column\_spec\_id of the dataset the model is trained - from is used. If neither is set, CreateModel will error. - display\_name - Output only. data\_type - Output only. + target_column_spec_id of the dataset the model is trained from + is used. If neither is set, CreateModel will error. + display_name - Output only. data_type - Output only. input_feature_column_specs: - Column specs of the dataset's primary table's columns, on + Column specs of the dataset’s primary table’s columns, on which the model is trained and which are used as the input for - predictions. The [target\_column][google.cloud.automl.v1beta1 - .TablesModelMetadata.target\_column\_spec] as well as, - according to dataset's state upon model creation, [weight\_co - lumn][google.cloud.automl.v1beta1.TablesDatasetMetadata.weight - \_column\_spec\_id], and [ml\_use\_column][google.cloud.autom - l.v1beta1.TablesDatasetMetadata.ml\_use\_column\_spec\_id] - must never be included here. Only 3 fields are used: - name - - May be set on CreateModel, if set only the columns specified - are used, otherwise all primary table's columns (except the - ones listed above) are used for the training and prediction - input. - display\_name - Output only. - data\_type - - Output only. + predictions. The [target_column][google.cloud.automl.v1beta1. + TablesModelMetadata.target_column_spec] as well as, according + to dataset’s state upon model creation, [weight_column][googl + e.cloud.automl.v1beta1.TablesDatasetMetadata.weight_column_spe + c_id], and [ml_use_column][google.cloud.automl.v1beta1.Tables + DatasetMetadata.ml_use_column_spec_id] must never be included + here. Only 3 fields are used: - name - May be set on + CreateModel, if set only the columns specified are used, + otherwise all primary table’s columns (except the ones + listed above) are used for the training and prediction input. + - display_name - Output only. - data_type - Output only. optimization_objective: Objective function the model is optimizing towards. The training process creates a model that maximizes/minimizes the value of the objective function over the validation set. The supported optimization objectives depend on the prediction type. If the field is not set, a default objective function is - used. CLASSIFICATION\_BINARY: "MAXIMIZE\_AU\_ROC" (default) - + used. CLASSIFICATION_BINARY: “MAXIMIZE_AU_ROC” (default) - Maximize the area under the receiver operating characteristic - (ROC) curve. "MINIMIZE\_LOG\_LOSS" - Minimize log loss. - "MAXIMIZE\_AU\_PRC" - Maximize the area under the precision- - recall curve. "MAXIMIZE\_PRECISION\_AT\_RECALL" - Maximize + (ROC) curve. “MINIMIZE_LOG_LOSS” - Minimize log loss. + “MAXIMIZE_AU_PRC” - Maximize the area under the precision- + recall curve. “MAXIMIZE_PRECISION_AT_RECALL” - Maximize precision for a specified recall value. - "MAXIMIZE\_RECALL\_AT\_PRECISION" - Maximize recall for a - specified precision value. CLASSIFICATION\_MULTI\_CLASS : - "MINIMIZE\_LOG\_LOSS" (default) - Minimize log loss. - REGRESSION: "MINIMIZE\_RMSE" (default) - Minimize root-mean- - squared error (RMSE). "MINIMIZE\_MAE" - Minimize mean-absolute - error (MAE). "MINIMIZE\_RMSLE" - Minimize root-mean-squared - log error (RMSLE). + “MAXIMIZE_RECALL_AT_PRECISION” - Maximize recall for a + specified precision value. CLASSIFICATION_MULTI_CLASS : + “MINIMIZE_LOG_LOSS” (default) - Minimize log loss. + REGRESSION: “MINIMIZE_RMSE” (default) - Minimize root-mean- + squared error (RMSE). “MINIMIZE_MAE” - Minimize mean-absolute + error (MAE). “MINIMIZE_RMSLE” - Minimize root-mean-squared log + error (RMSLE). tables_model_column_info: Output only. Auxiliary information for each of the - input\_feature\_column\_specs with respect to this particular + input_feature_column_specs with respect to this particular model. train_budget_milli_node_hours: Required. The train budget of creating this model, expressed - in milli node hours i.e. 1,000 value in this field means 1 + in milli node hours i.e. 1,000 value in this field means 1 node hour. The training cost of the model will not exceed this budget. The final cost will be attempted to be close to the budget, though may end up being (even) noticeably smaller - - at the backend's discretion. This especially may happen when + - at the backend’s discretion. This especially may happen when further model training ceases to provide any improvements. If the budget is set to a value known to be insufficient to train - a model for the given dataset, the training won't be attempted + a model for the given dataset, the training won’t be attempted and will error. The train budget must be between 1,000 and 72,000 milli node hours, inclusive. train_cost_milli_node_hours: Output only. The actual training cost of the model, expressed - in milli node hours, i.e. 1,000 value in this field means 1 + in milli node hours, i.e. 1,000 value in this field means 1 node hour. Guaranteed to not exceed the train budget. disable_early_stopping: Use the entire training budget. This disables the early @@ -839,62 +883,70 @@ before the entire training budget has been used. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TablesModelMetadata) - ), + }, ) _sym_db.RegisterMessage(TablesModelMetadata) TablesAnnotation = _reflection.GeneratedProtocolMessageType( "TablesAnnotation", (_message.Message,), - dict( - DESCRIPTOR=_TABLESANNOTATION, - __module__="google.cloud.automl_v1beta1.proto.tables_pb2", - __doc__="""Contains annotation details specific to Tables. - + { + "DESCRIPTOR": _TABLESANNOTATION, + "__module__": "google.cloud.automl_v1beta1.proto.tables_pb2", + "__doc__": """Contains annotation details specific to Tables. Attributes: score: Output only. A confidence estimate between 0.0 and 1.0, inclusive. A higher value means greater confidence in the - returned value. For [target\_column\_spec][google.cloud.autom - l.v1beta1.TablesModelMetadata.target\_column\_spec] of FLOAT64 + returned value. For [target_column_spec][google.cloud.automl. + v1beta1.TablesModelMetadata.target_column_spec] of FLOAT64 data type the score is not populated. prediction_interval: - Output only. Only populated when [target\_column\_spec][googl - e.cloud.automl.v1beta1.TablesModelMetadata.target\_column\_spe - c] has FLOAT64 data type. An interval in which the exactly + Output only. Only populated when [target_column_spec][google. + cloud.automl.v1beta1.TablesModelMetadata.target_column_spec] + has FLOAT64 data type. An interval in which the exactly correct target value has 95% chance to be in. value: - The predicted value of the row's [target\_column][google.clou - d.automl.v1beta1.TablesModelMetadata.target\_column\_spec]. - The value depends on the column's DataType: - CATEGORY - the + The predicted value of the row’s [target_column][google.cloud + .automl.v1beta1.TablesModelMetadata.target_column_spec]. The + value depends on the column’s DataType: - CATEGORY - the predicted (with the above confidence ``score``) CATEGORY value. - FLOAT64 - the predicted (with above ``prediction_interval``) FLOAT64 value. tables_model_column_info: - Output only. Auxiliary information for each of the model's [i - nput\_feature\_column\_specs][google.cloud.automl.v1beta1.Tabl - esModelMetadata.input\_feature\_column\_specs] with respect to - this particular prediction. If no other fields than [column\_ - spec\_name][google.cloud.automl.v1beta1.TablesModelColumnInfo. - column\_spec\_name] and [column\_display\_name][google.cloud. - automl.v1beta1.TablesModelColumnInfo.column\_display\_name] - would be populated, then this whole field is not. + Output only. Auxiliary information for each of the model’s [i + nput_feature_column_specs][google.cloud.automl.v1beta1.TablesM + odelMetadata.input_feature_column_specs] with respect to this + particular prediction. If no other fields than [column_spec_n + ame][google.cloud.automl.v1beta1.TablesModelColumnInfo.column\_ + spec_name] and [column_display_name][google.cloud.automl.v1be + ta1.TablesModelColumnInfo.column_display_name] would be + populated, then this whole field is not. + baseline_score: + Output only. Stores the prediction score for the baseline + example, which is defined as the example with all values set + to their baseline values. This is used as part of the Sampled + Shapley explanation of the model’s prediction. This field is + populated only when feature importance is requested. For + regression models, this holds the baseline prediction for the + baseline example. For classification models, this holds the + baseline prediction for the baseline example for the argmax + class. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TablesAnnotation) - ), + }, ) _sym_db.RegisterMessage(TablesAnnotation) TablesModelColumnInfo = _reflection.GeneratedProtocolMessageType( "TablesModelColumnInfo", (_message.Message,), - dict( - DESCRIPTOR=_TABLESMODELCOLUMNINFO, - __module__="google.cloud.automl_v1beta1.proto.tables_pb2", - __doc__="""An information specific to given column and Tables Model, - in context of the Model and the predictions created by it. - + { + "DESCRIPTOR": _TABLESMODELCOLUMNINFO, + "__module__": "google.cloud.automl_v1beta1.proto.tables_pb2", + "__doc__": """An information specific to given column and Tables Model, in context + of the Model and the predictions created by it. Attributes: column_spec_name: @@ -902,7 +954,7 @@ Not populated when this proto is outputted to BigQuery. column_display_name: Output only. The display name of the column (same as the - display\_name of its ColumnSpec). + display_name of its ColumnSpec). feature_importance: Output only. When given as part of a Model (always populated): Measurement of how much model predictions correctness on the @@ -910,10 +962,10 @@ and 1, higher means higher influence. These values are normalized - for all input feature columns of a given model they add to 1. When given back by Predict (populated iff - [feature\_importance + [feature_importance param][google.cloud.automl.v1beta1.PredictRequest.params] is - set) or Batch Predict (populated iff [feature\_importance][goo - gle.cloud.automl.v1beta1.PredictRequest.params] param is set): + set) or Batch Predict (populated iff [feature_importance][goog + le.cloud.automl.v1beta1.PredictRequest.params] param is set): Measurement of how impactful for the prediction returned for the given row the value in this column was. Specifically, the feature importance specifies the marginal contribution that @@ -922,7 +974,7 @@ Shapley method. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TablesModelColumnInfo) - ), + }, ) _sym_db.RegisterMessage(TablesModelColumnInfo) diff --git a/google/cloud/automl_v1beta1/proto/temporal.proto b/google/cloud/automl_v1beta1/proto/temporal.proto index 84874d99..76db8887 100644 --- a/google/cloud/automl_v1beta1/proto/temporal.proto +++ b/google/cloud/automl_v1beta1/proto/temporal.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; diff --git a/google/cloud/automl_v1beta1/proto/temporal_pb2.py b/google/cloud/automl_v1beta1/proto/temporal_pb2.py index a8e53db8..309c4644 100644 --- a/google/cloud/automl_v1beta1/proto/temporal_pb2.py +++ b/google/cloud/automl_v1beta1/proto/temporal_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/temporal.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -23,12 +20,9 @@ name="google/cloud/automl_v1beta1/proto/temporal.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - '\n0google/cloud/automl_v1beta1/proto/temporal.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1egoogle/protobuf/duration.proto\x1a\x1cgoogle/api/annotations.proto"w\n\x0bTimeSegment\x12\x34\n\x11start_time_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x32\n\x0f\x65nd_time_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3' - ), + serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n0google/cloud/automl_v1beta1/proto/temporal.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1egoogle/protobuf/duration.proto\x1a\x1cgoogle/api/annotations.proto"w\n\x0bTimeSegment\x12\x34\n\x11start_time_offset\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x32\n\x0f\x65nd_time_offset\x18\x02 \x01(\x0b\x32\x19.google.protobuf.DurationB\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ google_dot_protobuf_dot_duration__pb2.DESCRIPTOR, google_dot_api_dot_annotations__pb2.DESCRIPTOR, @@ -42,6 +36,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="start_time_offset", @@ -60,6 +55,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="end_time_offset", @@ -78,6 +74,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -104,12 +101,11 @@ TimeSegment = _reflection.GeneratedProtocolMessageType( "TimeSegment", (_message.Message,), - dict( - DESCRIPTOR=_TIMESEGMENT, - __module__="google.cloud.automl_v1beta1.proto.temporal_pb2", - __doc__="""A time period inside of an example that has a time - dimension (e.g. video). - + { + "DESCRIPTOR": _TIMESEGMENT, + "__module__": "google.cloud.automl_v1beta1.proto.temporal_pb2", + "__doc__": """A time period inside of an example that has a time dimension + (e.g. video). Attributes: start_time_offset: @@ -120,7 +116,7 @@ duration since the example start. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TimeSegment) - ), + }, ) _sym_db.RegisterMessage(TimeSegment) diff --git a/google/cloud/automl_v1beta1/proto/text.proto b/google/cloud/automl_v1beta1/proto/text.proto index ca722e07..f6f33185 100644 --- a/google/cloud/automl_v1beta1/proto/text.proto +++ b/google/cloud/automl_v1beta1/proto/text.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,14 +11,13 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; package google.cloud.automl.v1beta1; -import "google/api/annotations.proto"; import "google/cloud/automl/v1beta1/classification.proto"; +import "google/api/annotations.proto"; option go_package = "google.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl"; option java_multiple_files = true; @@ -40,20 +39,27 @@ message TextClassificationModelMetadata { } // Dataset metadata that is specific to text extraction -message TextExtractionDatasetMetadata {} +message TextExtractionDatasetMetadata { + +} // Model metadata that is specific to text extraction. -message TextExtractionModelMetadata {} +message TextExtractionModelMetadata { + +} // Dataset metadata for text sentiment. message TextSentimentDatasetMetadata { - // Required. A sentiment is expressed as an integer ordinal, where higher - // value means a more positive sentiment. The range of sentiments that will be - // used is between 0 and sentiment_max (inclusive on both ends), and all the - // values in the range must be represented in the dataset before a model can - // be created. sentiment_max value must be between 1 and 10 (inclusive). + // Required. A sentiment is expressed as an integer ordinal, where higher value + // means a more positive sentiment. The range of sentiments that will be used + // is between 0 and sentiment_max (inclusive on both ends), and all the values + // in the range must be represented in the dataset before a model can be + // created. + // sentiment_max value must be between 1 and 10 (inclusive). int32 sentiment_max = 1; } // Model metadata that is specific to text sentiment. -message TextSentimentModelMetadata {} +message TextSentimentModelMetadata { + +} diff --git a/google/cloud/automl_v1beta1/proto/text_extraction.proto b/google/cloud/automl_v1beta1/proto/text_extraction.proto index 07f0dda8..cfb0e0b3 100644 --- a/google/cloud/automl_v1beta1/proto/text_extraction.proto +++ b/google/cloud/automl_v1beta1/proto/text_extraction.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; diff --git a/google/cloud/automl_v1beta1/proto/text_extraction_pb2.py b/google/cloud/automl_v1beta1/proto/text_extraction_pb2.py index 04dc759c..ab21cf1e 100644 --- a/google/cloud/automl_v1beta1/proto/text_extraction_pb2.py +++ b/google/cloud/automl_v1beta1/proto/text_extraction_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/text_extraction.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -25,12 +22,9 @@ name="google/cloud/automl_v1beta1/proto/text_extraction.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - '\n7google/cloud/automl_v1beta1/proto/text_extraction.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x34google/cloud/automl_v1beta1/proto/text_segment.proto\x1a\x1cgoogle/api/annotations.proto"y\n\x18TextExtractionAnnotation\x12@\n\x0ctext_segment\x18\x03 \x01(\x0b\x32(.google.cloud.automl.v1beta1.TextSegmentH\x00\x12\r\n\x05score\x18\x01 \x01(\x02\x42\x0c\n\nannotation"\x97\x02\n\x1fTextExtractionEvaluationMetrics\x12\x0e\n\x06\x61u_prc\x18\x01 \x01(\x02\x12w\n\x1a\x63onfidence_metrics_entries\x18\x02 \x03(\x0b\x32S.google.cloud.automl.v1beta1.TextExtractionEvaluationMetrics.ConfidenceMetricsEntry\x1ak\n\x16\x43onfidenceMetricsEntry\x12\x1c\n\x14\x63onfidence_threshold\x18\x01 \x01(\x02\x12\x0e\n\x06recall\x18\x03 \x01(\x02\x12\x11\n\tprecision\x18\x04 \x01(\x02\x12\x10\n\x08\x66\x31_score\x18\x05 \x01(\x02\x42\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3' - ), + serialized_options=b"\n\037com.google.cloud.automl.v1beta1P\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n7google/cloud/automl_v1beta1/proto/text_extraction.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x34google/cloud/automl_v1beta1/proto/text_segment.proto\x1a\x1cgoogle/api/annotations.proto"y\n\x18TextExtractionAnnotation\x12@\n\x0ctext_segment\x18\x03 \x01(\x0b\x32(.google.cloud.automl.v1beta1.TextSegmentH\x00\x12\r\n\x05score\x18\x01 \x01(\x02\x42\x0c\n\nannotation"\x97\x02\n\x1fTextExtractionEvaluationMetrics\x12\x0e\n\x06\x61u_prc\x18\x01 \x01(\x02\x12w\n\x1a\x63onfidence_metrics_entries\x18\x02 \x03(\x0b\x32S.google.cloud.automl.v1beta1.TextExtractionEvaluationMetrics.ConfidenceMetricsEntry\x1ak\n\x16\x43onfidenceMetricsEntry\x12\x1c\n\x14\x63onfidence_threshold\x18\x01 \x01(\x02\x12\x0e\n\x06recall\x18\x03 \x01(\x02\x12\x11\n\tprecision\x18\x04 \x01(\x02\x12\x10\n\x08\x66\x31_score\x18\x05 \x01(\x02\x42\xa5\x01\n\x1f\x63om.google.cloud.automl.v1beta1P\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_text__segment__pb2.DESCRIPTOR, google_dot_api_dot_annotations__pb2.DESCRIPTOR, @@ -44,6 +38,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="text_segment", @@ -62,6 +57,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="score", @@ -80,6 +76,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -95,8 +92,9 @@ full_name="google.cloud.automl.v1beta1.TextExtractionAnnotation.annotation", index=0, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], - ) + ), ], serialized_start=172, serialized_end=293, @@ -109,6 +107,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="confidence_threshold", @@ -127,6 +126,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="recall", @@ -145,6 +145,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="precision", @@ -163,6 +164,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="f1_score", @@ -181,6 +183,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -201,6 +204,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="au_prc", @@ -219,6 +223,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="confidence_metrics_entries", @@ -237,10 +242,11 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], - nested_types=[_TEXTEXTRACTIONEVALUATIONMETRICS_CONFIDENCEMETRICSENTRY], + nested_types=[_TEXTEXTRACTIONEVALUATIONMETRICS_CONFIDENCEMETRICSENTRY,], enum_types=[], serialized_options=None, is_extendable=False, @@ -277,11 +283,10 @@ TextExtractionAnnotation = _reflection.GeneratedProtocolMessageType( "TextExtractionAnnotation", (_message.Message,), - dict( - DESCRIPTOR=_TEXTEXTRACTIONANNOTATION, - __module__="google.cloud.automl_v1beta1.proto.text_extraction_pb2", - __doc__="""Annotation for identifying spans of text. - + { + "DESCRIPTOR": _TEXTEXTRACTIONANNOTATION, + "__module__": "google.cloud.automl_v1beta1.proto.text_extraction_pb2", + "__doc__": """Annotation for identifying spans of text. Attributes: annotation: @@ -296,22 +301,21 @@ annotation. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TextExtractionAnnotation) - ), + }, ) _sym_db.RegisterMessage(TextExtractionAnnotation) TextExtractionEvaluationMetrics = _reflection.GeneratedProtocolMessageType( "TextExtractionEvaluationMetrics", (_message.Message,), - dict( - ConfidenceMetricsEntry=_reflection.GeneratedProtocolMessageType( + { + "ConfidenceMetricsEntry": _reflection.GeneratedProtocolMessageType( "ConfidenceMetricsEntry", (_message.Message,), - dict( - DESCRIPTOR=_TEXTEXTRACTIONEVALUATIONMETRICS_CONFIDENCEMETRICSENTRY, - __module__="google.cloud.automl_v1beta1.proto.text_extraction_pb2", - __doc__="""Metrics for a single confidence threshold. - + { + "DESCRIPTOR": _TEXTEXTRACTIONEVALUATIONMETRICS_CONFIDENCEMETRICSENTRY, + "__module__": "google.cloud.automl_v1beta1.proto.text_extraction_pb2", + "__doc__": """Metrics for a single confidence threshold. Attributes: confidence_threshold: @@ -326,12 +330,11 @@ Output only. The harmonic mean of recall and precision. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TextExtractionEvaluationMetrics.ConfidenceMetricsEntry) - ), + }, ), - DESCRIPTOR=_TEXTEXTRACTIONEVALUATIONMETRICS, - __module__="google.cloud.automl_v1beta1.proto.text_extraction_pb2", - __doc__="""Model evaluation metrics for text extraction problems. - + "DESCRIPTOR": _TEXTEXTRACTIONEVALUATIONMETRICS, + "__module__": "google.cloud.automl_v1beta1.proto.text_extraction_pb2", + "__doc__": """Model evaluation metrics for text extraction problems. Attributes: au_prc: @@ -341,7 +344,7 @@ Precision-recall curve can be derived from it. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TextExtractionEvaluationMetrics) - ), + }, ) _sym_db.RegisterMessage(TextExtractionEvaluationMetrics) _sym_db.RegisterMessage(TextExtractionEvaluationMetrics.ConfidenceMetricsEntry) diff --git a/google/cloud/automl_v1beta1/proto/text_pb2.py b/google/cloud/automl_v1beta1/proto/text_pb2.py index ea8fef3f..6d9b725d 100644 --- a/google/cloud/automl_v1beta1/proto/text_pb2.py +++ b/google/cloud/automl_v1beta1/proto/text_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/text.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -15,25 +12,22 @@ _sym_db = _symbol_database.Default() -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 from google.cloud.automl_v1beta1.proto import ( classification_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2, ) +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name="google/cloud/automl_v1beta1/proto/text.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1B\tTextProtoP\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - '\n,google/cloud/automl_v1beta1/proto/text.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto"q\n!TextClassificationDatasetMetadata\x12L\n\x13\x63lassification_type\x18\x01 \x01(\x0e\x32/.google.cloud.automl.v1beta1.ClassificationType"o\n\x1fTextClassificationModelMetadata\x12L\n\x13\x63lassification_type\x18\x03 \x01(\x0e\x32/.google.cloud.automl.v1beta1.ClassificationType"\x1f\n\x1dTextExtractionDatasetMetadata"\x1d\n\x1bTextExtractionModelMetadata"5\n\x1cTextSentimentDatasetMetadata\x12\x15\n\rsentiment_max\x18\x01 \x01(\x05"\x1c\n\x1aTextSentimentModelMetadataB\xb0\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\tTextProtoP\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3' - ), + serialized_options=b"\n\037com.google.cloud.automl.v1beta1B\tTextProtoP\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n,google/cloud/automl_v1beta1/proto/text.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto\x1a\x1cgoogle/api/annotations.proto"q\n!TextClassificationDatasetMetadata\x12L\n\x13\x63lassification_type\x18\x01 \x01(\x0e\x32/.google.cloud.automl.v1beta1.ClassificationType"o\n\x1fTextClassificationModelMetadata\x12L\n\x13\x63lassification_type\x18\x03 \x01(\x0e\x32/.google.cloud.automl.v1beta1.ClassificationType"\x1f\n\x1dTextExtractionDatasetMetadata"\x1d\n\x1bTextExtractionModelMetadata"5\n\x1cTextSentimentDatasetMetadata\x12\x15\n\rsentiment_max\x18\x01 \x01(\x05"\x1c\n\x1aTextSentimentModelMetadataB\xb0\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\tTextProtoP\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ - google_dot_api_dot_annotations__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2.DESCRIPTOR, + google_dot_api_dot_annotations__pb2.DESCRIPTOR, ], ) @@ -44,6 +38,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="classification_type", @@ -62,7 +57,8 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + create_key=_descriptor._internal_create_key, + ), ], extensions=[], nested_types=[], @@ -83,6 +79,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="classification_type", @@ -101,7 +98,8 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + create_key=_descriptor._internal_create_key, + ), ], extensions=[], nested_types=[], @@ -122,6 +120,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], extensions=[], nested_types=[], @@ -142,6 +141,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], extensions=[], nested_types=[], @@ -162,6 +162,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="sentiment_max", @@ -180,7 +181,8 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + create_key=_descriptor._internal_create_key, + ), ], extensions=[], nested_types=[], @@ -201,6 +203,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], extensions=[], nested_types=[], @@ -247,29 +250,27 @@ TextClassificationDatasetMetadata = _reflection.GeneratedProtocolMessageType( "TextClassificationDatasetMetadata", (_message.Message,), - dict( - DESCRIPTOR=_TEXTCLASSIFICATIONDATASETMETADATA, - __module__="google.cloud.automl_v1beta1.proto.text_pb2", - __doc__="""Dataset metadata for classification. - + { + "DESCRIPTOR": _TEXTCLASSIFICATIONDATASETMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.text_pb2", + "__doc__": """Dataset metadata for classification. Attributes: classification_type: Required. Type of the classification problem. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TextClassificationDatasetMetadata) - ), + }, ) _sym_db.RegisterMessage(TextClassificationDatasetMetadata) TextClassificationModelMetadata = _reflection.GeneratedProtocolMessageType( "TextClassificationModelMetadata", (_message.Message,), - dict( - DESCRIPTOR=_TEXTCLASSIFICATIONMODELMETADATA, - __module__="google.cloud.automl_v1beta1.proto.text_pb2", - __doc__="""Model metadata that is specific to text classification. - + { + "DESCRIPTOR": _TEXTCLASSIFICATIONMODELMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.text_pb2", + "__doc__": """Model metadata that is specific to text classification. Attributes: classification_type: @@ -277,73 +278,65 @@ this model. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TextClassificationModelMetadata) - ), + }, ) _sym_db.RegisterMessage(TextClassificationModelMetadata) TextExtractionDatasetMetadata = _reflection.GeneratedProtocolMessageType( "TextExtractionDatasetMetadata", (_message.Message,), - dict( - DESCRIPTOR=_TEXTEXTRACTIONDATASETMETADATA, - __module__="google.cloud.automl_v1beta1.proto.text_pb2", - __doc__="""Dataset metadata that is specific to text extraction - - """, + { + "DESCRIPTOR": _TEXTEXTRACTIONDATASETMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.text_pb2", + "__doc__": """Dataset metadata that is specific to text extraction""", # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TextExtractionDatasetMetadata) - ), + }, ) _sym_db.RegisterMessage(TextExtractionDatasetMetadata) TextExtractionModelMetadata = _reflection.GeneratedProtocolMessageType( "TextExtractionModelMetadata", (_message.Message,), - dict( - DESCRIPTOR=_TEXTEXTRACTIONMODELMETADATA, - __module__="google.cloud.automl_v1beta1.proto.text_pb2", - __doc__="""Model metadata that is specific to text extraction. - - """, + { + "DESCRIPTOR": _TEXTEXTRACTIONMODELMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.text_pb2", + "__doc__": """Model metadata that is specific to text extraction.""", # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TextExtractionModelMetadata) - ), + }, ) _sym_db.RegisterMessage(TextExtractionModelMetadata) TextSentimentDatasetMetadata = _reflection.GeneratedProtocolMessageType( "TextSentimentDatasetMetadata", (_message.Message,), - dict( - DESCRIPTOR=_TEXTSENTIMENTDATASETMETADATA, - __module__="google.cloud.automl_v1beta1.proto.text_pb2", - __doc__="""Dataset metadata for text sentiment. - + { + "DESCRIPTOR": _TEXTSENTIMENTDATASETMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.text_pb2", + "__doc__": """Dataset metadata for text sentiment. Attributes: sentiment_max: Required. A sentiment is expressed as an integer ordinal, where higher value means a more positive sentiment. The range - of sentiments that will be used is between 0 and - sentiment\_max (inclusive on both ends), and all the values in - the range must be represented in the dataset before a model - can be created. sentiment\_max value must be between 1 and 10 - (inclusive). + of sentiments that will be used is between 0 and sentiment_max + (inclusive on both ends), and all the values in the range must + be represented in the dataset before a model can be created. + sentiment_max value must be between 1 and 10 (inclusive). """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TextSentimentDatasetMetadata) - ), + }, ) _sym_db.RegisterMessage(TextSentimentDatasetMetadata) TextSentimentModelMetadata = _reflection.GeneratedProtocolMessageType( "TextSentimentModelMetadata", (_message.Message,), - dict( - DESCRIPTOR=_TEXTSENTIMENTMODELMETADATA, - __module__="google.cloud.automl_v1beta1.proto.text_pb2", - __doc__="""Model metadata that is specific to text sentiment. - - """, + { + "DESCRIPTOR": _TEXTSENTIMENTMODELMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.text_pb2", + "__doc__": """Model metadata that is specific to text sentiment.""", # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TextSentimentModelMetadata) - ), + }, ) _sym_db.RegisterMessage(TextSentimentModelMetadata) diff --git a/google/cloud/automl_v1beta1/proto/text_segment.proto b/google/cloud/automl_v1beta1/proto/text_segment.proto index 41b8be1c..94b17d93 100644 --- a/google/cloud/automl_v1beta1/proto/text_segment.proto +++ b/google/cloud/automl_v1beta1/proto/text_segment.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; diff --git a/google/cloud/automl_v1beta1/proto/text_segment_pb2.py b/google/cloud/automl_v1beta1/proto/text_segment_pb2.py index e896211e..ed5ae997 100644 --- a/google/cloud/automl_v1beta1/proto/text_segment_pb2.py +++ b/google/cloud/automl_v1beta1/proto/text_segment_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/text_segment.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -22,13 +19,10 @@ name="google/cloud/automl_v1beta1/proto/text_segment.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1B\020TextSegmentProtoP\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - '\n4google/cloud/automl_v1beta1/proto/text_segment.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto"H\n\x0bTextSegment\x12\x0f\n\x07\x63ontent\x18\x03 \x01(\t\x12\x14\n\x0cstart_offset\x18\x01 \x01(\x03\x12\x12\n\nend_offset\x18\x02 \x01(\x03\x42\xb7\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\x10TextSegmentProtoP\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3' - ), - dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR], + serialized_options=b"\n\037com.google.cloud.automl.v1beta1B\020TextSegmentProtoP\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n4google/cloud/automl_v1beta1/proto/text_segment.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1cgoogle/api/annotations.proto"H\n\x0bTextSegment\x12\x0f\n\x07\x63ontent\x18\x03 \x01(\t\x12\x14\n\x0cstart_offset\x18\x01 \x01(\x03\x12\x12\n\nend_offset\x18\x02 \x01(\x03\x42\xb7\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\x10TextSegmentProtoP\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', + dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,], ) @@ -38,6 +32,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="content", @@ -48,7 +43,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -56,6 +51,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="start_offset", @@ -74,6 +70,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="end_offset", @@ -92,6 +89,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -112,12 +110,11 @@ TextSegment = _reflection.GeneratedProtocolMessageType( "TextSegment", (_message.Message,), - dict( - DESCRIPTOR=_TEXTSEGMENT, - __module__="google.cloud.automl_v1beta1.proto.text_segment_pb2", - __doc__="""A contiguous part of a text (string), assuming it has an - UTF-8 NFC encoding. - + { + "DESCRIPTOR": _TEXTSEGMENT, + "__module__": "google.cloud.automl_v1beta1.proto.text_segment_pb2", + "__doc__": """A contiguous part of a text (string), assuming it has an UTF-8 NFC + encoding. Attributes: content: @@ -129,11 +126,11 @@ end_offset: Required. Zero-based character index of the first character past the end of the text segment (counting character from the - beginning of the text). The character at the end\_offset is - NOT included in the text segment. + beginning of the text). The character at the end_offset is NOT + included in the text segment. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TextSegment) - ), + }, ) _sym_db.RegisterMessage(TextSegment) diff --git a/google/cloud/automl_v1beta1/proto/text_sentiment.proto b/google/cloud/automl_v1beta1/proto/text_sentiment.proto index 978acb0f..5444c52b 100644 --- a/google/cloud/automl_v1beta1/proto/text_sentiment.proto +++ b/google/cloud/automl_v1beta1/proto/text_sentiment.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; diff --git a/google/cloud/automl_v1beta1/proto/text_sentiment_pb2.py b/google/cloud/automl_v1beta1/proto/text_sentiment_pb2.py index c55c9979..1332660d 100644 --- a/google/cloud/automl_v1beta1/proto/text_sentiment_pb2.py +++ b/google/cloud/automl_v1beta1/proto/text_sentiment_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/text_sentiment.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -25,12 +22,9 @@ name="google/cloud/automl_v1beta1/proto/text_sentiment.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1B\022TextSentimentProtoZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - '\n6google/cloud/automl_v1beta1/proto/text_sentiment.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto\x1a\x1cgoogle/api/annotations.proto",\n\x17TextSentimentAnnotation\x12\x11\n\tsentiment\x18\x01 \x01(\x05"\xc5\x02\n\x1eTextSentimentEvaluationMetrics\x12\x11\n\tprecision\x18\x01 \x01(\x02\x12\x0e\n\x06recall\x18\x02 \x01(\x02\x12\x10\n\x08\x66\x31_score\x18\x03 \x01(\x02\x12\x1b\n\x13mean_absolute_error\x18\x04 \x01(\x02\x12\x1a\n\x12mean_squared_error\x18\x05 \x01(\x02\x12\x14\n\x0clinear_kappa\x18\x06 \x01(\x02\x12\x17\n\x0fquadratic_kappa\x18\x07 \x01(\x02\x12\x66\n\x10\x63onfusion_matrix\x18\x08 \x01(\x0b\x32L.google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfusionMatrix\x12\x1e\n\x12\x61nnotation_spec_id\x18\t \x03(\tB\x02\x18\x01\x42\xb7\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\x12TextSentimentProtoZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3' - ), + serialized_options=b"\n\037com.google.cloud.automl.v1beta1B\022TextSentimentProtoZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n6google/cloud/automl_v1beta1/proto/text_sentiment.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto\x1a\x1cgoogle/api/annotations.proto",\n\x17TextSentimentAnnotation\x12\x11\n\tsentiment\x18\x01 \x01(\x05"\xc5\x02\n\x1eTextSentimentEvaluationMetrics\x12\x11\n\tprecision\x18\x01 \x01(\x02\x12\x0e\n\x06recall\x18\x02 \x01(\x02\x12\x10\n\x08\x66\x31_score\x18\x03 \x01(\x02\x12\x1b\n\x13mean_absolute_error\x18\x04 \x01(\x02\x12\x1a\n\x12mean_squared_error\x18\x05 \x01(\x02\x12\x14\n\x0clinear_kappa\x18\x06 \x01(\x02\x12\x17\n\x0fquadratic_kappa\x18\x07 \x01(\x02\x12\x66\n\x10\x63onfusion_matrix\x18\x08 \x01(\x0b\x32L.google.cloud.automl.v1beta1.ClassificationEvaluationMetrics.ConfusionMatrix\x12\x1e\n\x12\x61nnotation_spec_id\x18\t \x03(\tB\x02\x18\x01\x42\xb7\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\x12TextSentimentProtoZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2.DESCRIPTOR, google_dot_api_dot_annotations__pb2.DESCRIPTOR, @@ -44,6 +38,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="sentiment", @@ -62,7 +57,8 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + create_key=_descriptor._internal_create_key, + ), ], extensions=[], nested_types=[], @@ -83,6 +79,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="precision", @@ -101,6 +98,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="recall", @@ -119,6 +117,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="f1_score", @@ -137,6 +136,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="mean_absolute_error", @@ -155,6 +155,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="mean_squared_error", @@ -173,6 +174,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="linear_kappa", @@ -191,6 +193,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="quadratic_kappa", @@ -209,6 +212,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="confusion_matrix", @@ -227,6 +231,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="annotation_spec_id", @@ -243,8 +248,9 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=_b("\030\001"), + serialized_options=b"\030\001", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -273,11 +279,10 @@ TextSentimentAnnotation = _reflection.GeneratedProtocolMessageType( "TextSentimentAnnotation", (_message.Message,), - dict( - DESCRIPTOR=_TEXTSENTIMENTANNOTATION, - __module__="google.cloud.automl_v1beta1.proto.text_sentiment_pb2", - __doc__="""Contains annotation details specific to text sentiment. - + { + "DESCRIPTOR": _TEXTSENTIMENTANNOTATION, + "__module__": "google.cloud.automl_v1beta1.proto.text_sentiment_pb2", + "__doc__": """Contains annotation details specific to text sentiment. Attributes: sentiment: @@ -286,30 +291,29 @@ ata] when populating the dataset from which the model used for the prediction had been trained. The sentiment values are between 0 and - Dataset.text\_sentiment\_dataset\_metadata.sentiment\_max + Dataset.text_sentiment_dataset_metadata.sentiment_max (inclusive), with higher value meaning more positive - sentiment. They are completely relative, i.e. 0 means least - positive sentiment and sentiment\_max means the most positive - from the sentiments present in the train data. Therefore e.g. - if train data had only negative sentiment, then - sentiment\_max, would be still negative (although least - negative). The sentiment shouldn't be confused with "score" or - "magnitude" from the previous Natural Language Sentiment + sentiment. They are completely relative, i.e. 0 means least + positive sentiment and sentiment_max means the most positive + from the sentiments present in the train data. Therefore + e.g. if train data had only negative sentiment, then + sentiment_max, would be still negative (although least + negative). The sentiment shouldn’t be confused with “score” or + “magnitude” from the previous Natural Language Sentiment Analysis API. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TextSentimentAnnotation) - ), + }, ) _sym_db.RegisterMessage(TextSentimentAnnotation) TextSentimentEvaluationMetrics = _reflection.GeneratedProtocolMessageType( "TextSentimentEvaluationMetrics", (_message.Message,), - dict( - DESCRIPTOR=_TEXTSENTIMENTEVALUATIONMETRICS, - __module__="google.cloud.automl_v1beta1.proto.text_sentiment_pb2", - __doc__="""Model evaluation metrics for text sentiment problems. - + { + "DESCRIPTOR": _TEXTSENTIMENTEVALUATIONMETRICS, + "__module__": "google.cloud.automl_v1beta1.proto.text_sentiment_pb2", + "__doc__": """Model evaluation metrics for text sentiment problems. Attributes: precision: @@ -343,7 +347,7 @@ Deprecated . """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TextSentimentEvaluationMetrics) - ), + }, ) _sym_db.RegisterMessage(TextSentimentEvaluationMetrics) diff --git a/google/cloud/automl_v1beta1/proto/translation.proto b/google/cloud/automl_v1beta1/proto/translation.proto index ed02b2af..8585bd41 100644 --- a/google/cloud/automl_v1beta1/proto/translation.proto +++ b/google/cloud/automl_v1beta1/proto/translation.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,12 +11,12 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; package google.cloud.automl.v1beta1; +import "google/api/field_behavior.proto"; import "google/cloud/automl/v1beta1/data_items.proto"; import "google/api/annotations.proto"; @@ -30,10 +30,10 @@ option ruby_package = "Google::Cloud::AutoML::V1beta1"; // Dataset metadata that is specific to translation. message TranslationDatasetMetadata { // Required. The BCP-47 language code of the source language. - string source_language_code = 1; + string source_language_code = 1 [(google.api.field_behavior) = REQUIRED]; // Required. The BCP-47 language code of the target language. - string target_language_code = 2; + string target_language_code = 2 [(google.api.field_behavior) = REQUIRED]; } // Evaluation metrics for the dataset. diff --git a/google/cloud/automl_v1beta1/proto/translation_pb2.py b/google/cloud/automl_v1beta1/proto/translation_pb2.py index 15a08176..b5df3e32 100644 --- a/google/cloud/automl_v1beta1/proto/translation_pb2.py +++ b/google/cloud/automl_v1beta1/proto/translation_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/translation.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -15,6 +12,7 @@ _sym_db = _symbol_database.Default() +from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 from google.cloud.automl_v1beta1.proto import ( data_items_pb2 as google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_data__items__pb2, ) @@ -25,13 +23,11 @@ name="google/cloud/automl_v1beta1/proto/translation.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1B\020TranslationProtoP\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - '\n3google/cloud/automl_v1beta1/proto/translation.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x32google/cloud/automl_v1beta1/proto/data_items.proto\x1a\x1cgoogle/api/annotations.proto"X\n\x1aTranslationDatasetMetadata\x12\x1c\n\x14source_language_code\x18\x01 \x01(\t\x12\x1c\n\x14target_language_code\x18\x02 \x01(\t"K\n\x1cTranslationEvaluationMetrics\x12\x12\n\nbleu_score\x18\x01 \x01(\x01\x12\x17\n\x0f\x62\x61se_bleu_score\x18\x02 \x01(\x01"j\n\x18TranslationModelMetadata\x12\x12\n\nbase_model\x18\x01 \x01(\t\x12\x1c\n\x14source_language_code\x18\x02 \x01(\t\x12\x1c\n\x14target_language_code\x18\x03 \x01(\t"]\n\x15TranslationAnnotation\x12\x44\n\x12translated_content\x18\x01 \x01(\x0b\x32(.google.cloud.automl.v1beta1.TextSnippetB\xb7\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\x10TranslationProtoP\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3' - ), + serialized_options=b"\n\037com.google.cloud.automl.v1beta1B\020TranslationProtoP\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n3google/cloud/automl_v1beta1/proto/translation.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x1fgoogle/api/field_behavior.proto\x1a\x32google/cloud/automl_v1beta1/proto/data_items.proto\x1a\x1cgoogle/api/annotations.proto"b\n\x1aTranslationDatasetMetadata\x12!\n\x14source_language_code\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12!\n\x14target_language_code\x18\x02 \x01(\tB\x03\xe0\x41\x02"K\n\x1cTranslationEvaluationMetrics\x12\x12\n\nbleu_score\x18\x01 \x01(\x01\x12\x17\n\x0f\x62\x61se_bleu_score\x18\x02 \x01(\x01"j\n\x18TranslationModelMetadata\x12\x12\n\nbase_model\x18\x01 \x01(\t\x12\x1c\n\x14source_language_code\x18\x02 \x01(\t\x12\x1c\n\x14target_language_code\x18\x03 \x01(\t"]\n\x15TranslationAnnotation\x12\x44\n\x12translated_content\x18\x01 \x01(\x0b\x32(.google.cloud.automl.v1beta1.TextSnippetB\xb7\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\x10TranslationProtoP\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ + google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_data__items__pb2.DESCRIPTOR, google_dot_api_dot_annotations__pb2.DESCRIPTOR, ], @@ -44,6 +40,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="source_language_code", @@ -54,14 +51,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="target_language_code", @@ -72,14 +70,15 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=b"\340A\002", file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -90,8 +89,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=166, - serialized_end=254, + serialized_start=199, + serialized_end=297, ) @@ -101,6 +100,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="bleu_score", @@ -119,6 +119,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="base_bleu_score", @@ -137,6 +138,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -147,8 +149,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=256, - serialized_end=331, + serialized_start=299, + serialized_end=374, ) @@ -158,6 +160,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="base_model", @@ -168,7 +171,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -176,6 +179,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="source_language_code", @@ -186,7 +190,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -194,6 +198,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), _descriptor.FieldDescriptor( name="target_language_code", @@ -204,7 +209,7 @@ cpp_type=9, label=1, has_default_value=False, - default_value=_b("").decode("utf-8"), + default_value=b"".decode("utf-8"), message_type=None, enum_type=None, containing_type=None, @@ -212,6 +217,7 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, ), ], extensions=[], @@ -222,8 +228,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=333, - serialized_end=439, + serialized_start=376, + serialized_end=482, ) @@ -233,6 +239,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[ _descriptor.FieldDescriptor( name="translated_content", @@ -251,7 +258,8 @@ extension_scope=None, serialized_options=None, file=DESCRIPTOR, - ) + create_key=_descriptor._internal_create_key, + ), ], extensions=[], nested_types=[], @@ -261,8 +269,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=441, - serialized_end=534, + serialized_start=484, + serialized_end=577, ) _TRANSLATIONANNOTATION.fields_by_name[ @@ -283,11 +291,10 @@ TranslationDatasetMetadata = _reflection.GeneratedProtocolMessageType( "TranslationDatasetMetadata", (_message.Message,), - dict( - DESCRIPTOR=_TRANSLATIONDATASETMETADATA, - __module__="google.cloud.automl_v1beta1.proto.translation_pb2", - __doc__="""Dataset metadata that is specific to translation. - + { + "DESCRIPTOR": _TRANSLATIONDATASETMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.translation_pb2", + "__doc__": """Dataset metadata that is specific to translation. Attributes: source_language_code: @@ -296,18 +303,17 @@ Required. The BCP-47 language code of the target language. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TranslationDatasetMetadata) - ), + }, ) _sym_db.RegisterMessage(TranslationDatasetMetadata) TranslationEvaluationMetrics = _reflection.GeneratedProtocolMessageType( "TranslationEvaluationMetrics", (_message.Message,), - dict( - DESCRIPTOR=_TRANSLATIONEVALUATIONMETRICS, - __module__="google.cloud.automl_v1beta1.proto.translation_pb2", - __doc__="""Evaluation metrics for the dataset. - + { + "DESCRIPTOR": _TRANSLATIONEVALUATIONMETRICS, + "__module__": "google.cloud.automl_v1beta1.proto.translation_pb2", + "__doc__": """Evaluation metrics for the dataset. Attributes: bleu_score: @@ -316,18 +322,17 @@ Output only. BLEU score for base model. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TranslationEvaluationMetrics) - ), + }, ) _sym_db.RegisterMessage(TranslationEvaluationMetrics) TranslationModelMetadata = _reflection.GeneratedProtocolMessageType( "TranslationModelMetadata", (_message.Message,), - dict( - DESCRIPTOR=_TRANSLATIONMODELMETADATA, - __module__="google.cloud.automl_v1beta1.proto.translation_pb2", - __doc__="""Model metadata that is specific to translation. - + { + "DESCRIPTOR": _TRANSLATIONMODELMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.translation_pb2", + "__doc__": """Model metadata that is specific to translation. Attributes: base_model: @@ -343,28 +348,29 @@ that is used for training. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TranslationModelMetadata) - ), + }, ) _sym_db.RegisterMessage(TranslationModelMetadata) TranslationAnnotation = _reflection.GeneratedProtocolMessageType( "TranslationAnnotation", (_message.Message,), - dict( - DESCRIPTOR=_TRANSLATIONANNOTATION, - __module__="google.cloud.automl_v1beta1.proto.translation_pb2", - __doc__="""Annotation details specific to translation. - + { + "DESCRIPTOR": _TRANSLATIONANNOTATION, + "__module__": "google.cloud.automl_v1beta1.proto.translation_pb2", + "__doc__": """Annotation details specific to translation. Attributes: translated_content: Output only . The translated content. """, # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.TranslationAnnotation) - ), + }, ) _sym_db.RegisterMessage(TranslationAnnotation) DESCRIPTOR._options = None +_TRANSLATIONDATASETMETADATA.fields_by_name["source_language_code"]._options = None +_TRANSLATIONDATASETMETADATA.fields_by_name["target_language_code"]._options = None # @@protoc_insertion_point(module_scope) diff --git a/google/cloud/automl_v1beta1/proto/video.proto b/google/cloud/automl_v1beta1/proto/video.proto index b7c7325b..268ae2a8 100644 --- a/google/cloud/automl_v1beta1/proto/video.proto +++ b/google/cloud/automl_v1beta1/proto/video.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; diff --git a/google/cloud/automl_v1beta1/proto/video_pb2.py b/google/cloud/automl_v1beta1/proto/video_pb2.py index 1481681a..b870cb4c 100644 --- a/google/cloud/automl_v1beta1/proto/video_pb2.py +++ b/google/cloud/automl_v1beta1/proto/video_pb2.py @@ -2,9 +2,6 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/cloud/automl_v1beta1/proto/video.proto -import sys - -_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -25,12 +22,9 @@ name="google/cloud/automl_v1beta1/proto/video.proto", package="google.cloud.automl.v1beta1", syntax="proto3", - serialized_options=_b( - "\n\037com.google.cloud.automl.v1beta1B\nVideoProtoP\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1" - ), - serialized_pb=_b( - '\n-google/cloud/automl_v1beta1/proto/video.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto\x1a\x1cgoogle/api/annotations.proto"$\n"VideoClassificationDatasetMetadata"$\n"VideoObjectTrackingDatasetMetadata""\n VideoClassificationModelMetadata""\n VideoObjectTrackingModelMetadataB\xb1\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\nVideoProtoP\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3' - ), + serialized_options=b"\n\037com.google.cloud.automl.v1beta1B\nVideoProtoP\001ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\312\002\033Google\\Cloud\\AutoMl\\V1beta1\352\002\036Google::Cloud::AutoML::V1beta1", + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n-google/cloud/automl_v1beta1/proto/video.proto\x12\x1bgoogle.cloud.automl.v1beta1\x1a\x36google/cloud/automl_v1beta1/proto/classification.proto\x1a\x1cgoogle/api/annotations.proto"$\n"VideoClassificationDatasetMetadata"$\n"VideoObjectTrackingDatasetMetadata""\n VideoClassificationModelMetadata""\n VideoObjectTrackingModelMetadataB\xb1\x01\n\x1f\x63om.google.cloud.automl.v1beta1B\nVideoProtoP\x01ZAgoogle.golang.org/genproto/googleapis/cloud/automl/v1beta1;automl\xca\x02\x1bGoogle\\Cloud\\AutoMl\\V1beta1\xea\x02\x1eGoogle::Cloud::AutoML::V1beta1b\x06proto3', dependencies=[ google_dot_cloud_dot_automl__v1beta1_dot_proto_dot_classification__pb2.DESCRIPTOR, google_dot_api_dot_annotations__pb2.DESCRIPTOR, @@ -44,6 +38,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], extensions=[], nested_types=[], @@ -64,6 +59,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], extensions=[], nested_types=[], @@ -84,6 +80,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], extensions=[], nested_types=[], @@ -104,6 +101,7 @@ filename=None, file=DESCRIPTOR, containing_type=None, + create_key=_descriptor._internal_create_key, fields=[], extensions=[], nested_types=[], @@ -134,57 +132,49 @@ VideoClassificationDatasetMetadata = _reflection.GeneratedProtocolMessageType( "VideoClassificationDatasetMetadata", (_message.Message,), - dict( - DESCRIPTOR=_VIDEOCLASSIFICATIONDATASETMETADATA, - __module__="google.cloud.automl_v1beta1.proto.video_pb2", - __doc__="""Dataset metadata specific to video classification. All - Video Classification datasets are treated as multi label. - - """, + { + "DESCRIPTOR": _VIDEOCLASSIFICATIONDATASETMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.video_pb2", + "__doc__": """Dataset metadata specific to video classification. All Video + Classification datasets are treated as multi label.""", # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.VideoClassificationDatasetMetadata) - ), + }, ) _sym_db.RegisterMessage(VideoClassificationDatasetMetadata) VideoObjectTrackingDatasetMetadata = _reflection.GeneratedProtocolMessageType( "VideoObjectTrackingDatasetMetadata", (_message.Message,), - dict( - DESCRIPTOR=_VIDEOOBJECTTRACKINGDATASETMETADATA, - __module__="google.cloud.automl_v1beta1.proto.video_pb2", - __doc__="""Dataset metadata specific to video object tracking. - - """, + { + "DESCRIPTOR": _VIDEOOBJECTTRACKINGDATASETMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.video_pb2", + "__doc__": """Dataset metadata specific to video object tracking.""", # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.VideoObjectTrackingDatasetMetadata) - ), + }, ) _sym_db.RegisterMessage(VideoObjectTrackingDatasetMetadata) VideoClassificationModelMetadata = _reflection.GeneratedProtocolMessageType( "VideoClassificationModelMetadata", (_message.Message,), - dict( - DESCRIPTOR=_VIDEOCLASSIFICATIONMODELMETADATA, - __module__="google.cloud.automl_v1beta1.proto.video_pb2", - __doc__="""Model metadata specific to video classification. - - """, + { + "DESCRIPTOR": _VIDEOCLASSIFICATIONMODELMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.video_pb2", + "__doc__": """Model metadata specific to video classification.""", # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.VideoClassificationModelMetadata) - ), + }, ) _sym_db.RegisterMessage(VideoClassificationModelMetadata) VideoObjectTrackingModelMetadata = _reflection.GeneratedProtocolMessageType( "VideoObjectTrackingModelMetadata", (_message.Message,), - dict( - DESCRIPTOR=_VIDEOOBJECTTRACKINGMODELMETADATA, - __module__="google.cloud.automl_v1beta1.proto.video_pb2", - __doc__="""Model metadata specific to video object tracking. - - """, + { + "DESCRIPTOR": _VIDEOOBJECTTRACKINGMODELMETADATA, + "__module__": "google.cloud.automl_v1beta1.proto.video_pb2", + "__doc__": """Model metadata specific to video object tracking.""", # @@protoc_insertion_point(class_scope:google.cloud.automl.v1beta1.VideoObjectTrackingModelMetadata) - ), + }, ) _sym_db.RegisterMessage(VideoObjectTrackingModelMetadata) diff --git a/noxfile.py b/noxfile.py index 8e90abd8..512fe04a 100644 --- a/noxfile.py +++ b/noxfile.py @@ -23,14 +23,15 @@ import nox -BLACK_VERSION = "black==19.3b0" +BLACK_VERSION = "black==19.10b0" BLACK_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"] -if os.path.exists("samples"): - BLACK_PATHS.append("samples") +DEFAULT_PYTHON_VERSION = "3.8" +SYSTEM_TEST_PYTHON_VERSIONS = ["2.7", "3.8"] +UNIT_TEST_PYTHON_VERSIONS = ["2.7", "3.5", "3.6", "3.7", "3.8"] -@nox.session(python="3.7") +@nox.session(python=DEFAULT_PYTHON_VERSION) def lint(session): """Run linters. @@ -38,7 +39,9 @@ def lint(session): serious code quality issues. """ session.install("flake8", BLACK_VERSION) - session.run("black", "--check", *BLACK_PATHS) + session.run( + "black", "--check", *BLACK_PATHS, + ) session.run("flake8", "google", "tests") @@ -53,10 +56,12 @@ def blacken(session): check the state of the `gcp_ubuntu_config` we use for that Kokoro run. """ session.install(BLACK_VERSION) - session.run("black", *BLACK_PATHS) + session.run( + "black", *BLACK_PATHS, + ) -@nox.session(python="3.7") +@nox.session(python=DEFAULT_PYTHON_VERSION) def lint_setup_py(session): """Verify that setup.py is valid (including RST check).""" session.install("docutils", "pygments") @@ -72,6 +77,7 @@ def default(session): session.run( "py.test", "--quiet", + "--cov=google.cloud.automl", "--cov=google.cloud", "--cov=tests.unit", "--cov-append", @@ -83,13 +89,13 @@ def default(session): ) -@nox.session(python=["2.7", "3.5", "3.6", "3.7", "3.8"]) +@nox.session(python=UNIT_TEST_PYTHON_VERSIONS) def unit(session): """Run the unit test suite.""" default(session) -@nox.session(python=["2.7", "3.7"]) +@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) def system(session): """Run the system test suite.""" system_test_path = os.path.join("tests", "system.py") @@ -109,7 +115,9 @@ def system(session): # Install all test dependencies, then install this package into the # virtualenv's dist-packages. - session.install("mock", "pytest", "google-cloud-testutils") + session.install( + "mock", "pytest", "google-cloud-testutils", + ) session.install("-e", ".[pandas,storage]") # Run py.test against the system tests. @@ -119,7 +127,7 @@ def system(session): session.run("py.test", "--quiet", system_test_folder_path, *session.posargs) -@nox.session(python="3.7") +@nox.session(python=DEFAULT_PYTHON_VERSION) def cover(session): """Run the final coverage report. @@ -132,7 +140,7 @@ def cover(session): session.run("coverage", "erase") -@nox.session(python="3.7") +@nox.session(python=DEFAULT_PYTHON_VERSION) def docs(session): """Build the docs for this library.""" @@ -142,7 +150,6 @@ def docs(session): shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) session.run( "sphinx-build", - "-W", # warnings as errors "-T", # show full traceback on exception "-N", # no colors "-b", diff --git a/scripts/decrypt-secrets.sh b/scripts/decrypt-secrets.sh new file mode 100755 index 00000000..ff599eb2 --- /dev/null +++ b/scripts/decrypt-secrets.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +# Copyright 2015 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +ROOT=$( dirname "$DIR" ) + +# Work from the project root. +cd $ROOT + +# Use SECRET_MANAGER_PROJECT if set, fallback to cloud-devrel-kokoro-resources. +PROJECT_ID="${SECRET_MANAGER_PROJECT:-cloud-devrel-kokoro-resources}" + +gcloud secrets versions access latest --secret="python-docs-samples-test-env" \ + > testing/test-env.sh +gcloud secrets versions access latest \ + --secret="python-docs-samples-service-account" \ + > testing/service-account.json +gcloud secrets versions access latest \ + --secret="python-docs-samples-client-secrets" \ + > testing/client-secrets.json \ No newline at end of file diff --git a/scripts/readme-gen/readme_gen.py b/scripts/readme-gen/readme_gen.py new file mode 100644 index 00000000..d309d6e9 --- /dev/null +++ b/scripts/readme-gen/readme_gen.py @@ -0,0 +1,66 @@ +#!/usr/bin/env python + +# Copyright 2016 Google Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Generates READMEs using configuration defined in yaml.""" + +import argparse +import io +import os +import subprocess + +import jinja2 +import yaml + + +jinja_env = jinja2.Environment( + trim_blocks=True, + loader=jinja2.FileSystemLoader( + os.path.abspath(os.path.join(os.path.dirname(__file__), 'templates')))) + +README_TMPL = jinja_env.get_template('README.tmpl.rst') + + +def get_help(file): + return subprocess.check_output(['python', file, '--help']).decode() + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('source') + parser.add_argument('--destination', default='README.rst') + + args = parser.parse_args() + + source = os.path.abspath(args.source) + root = os.path.dirname(source) + destination = os.path.join(root, args.destination) + + jinja_env.globals['get_help'] = get_help + + with io.open(source, 'r') as f: + config = yaml.load(f) + + # This allows get_help to execute in the right directory. + os.chdir(root) + + output = README_TMPL.render(config) + + with io.open(destination, 'w') as f: + f.write(output) + + +if __name__ == '__main__': + main() diff --git a/scripts/readme-gen/templates/README.tmpl.rst b/scripts/readme-gen/templates/README.tmpl.rst new file mode 100644 index 00000000..4fd23976 --- /dev/null +++ b/scripts/readme-gen/templates/README.tmpl.rst @@ -0,0 +1,87 @@ +{# The following line is a lie. BUT! Once jinja2 is done with it, it will + become truth! #} +.. This file is automatically generated. Do not edit this file directly. + +{{product.name}} Python Samples +=============================================================================== + +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor={{folder}}/README.rst + + +This directory contains samples for {{product.name}}. {{product.description}} + +{{description}} + +.. _{{product.name}}: {{product.url}} + +{% if required_api_url %} +To run the sample, you need to enable the API at: {{required_api_url}} +{% endif %} + +{% if required_role %} +To run the sample, you need to have `{{required_role}}` role. +{% endif %} + +{{other_required_steps}} + +{% if setup %} +Setup +------------------------------------------------------------------------------- + +{% for section in setup %} + +{% include section + '.tmpl.rst' %} + +{% endfor %} +{% endif %} + +{% if samples %} +Samples +------------------------------------------------------------------------------- + +{% for sample in samples %} +{{sample.name}} ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +{% if not sample.hide_cloudshell_button %} +.. image:: https://gstatic.com/cloudssh/images/open-btn.png + :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor={{folder}}/{{sample.file}},{{folder}}/README.rst +{% endif %} + + +{{sample.description}} + +To run this sample: + +.. code-block:: bash + + $ python {{sample.file}} +{% if sample.show_help %} + + {{get_help(sample.file)|indent}} +{% endif %} + + +{% endfor %} +{% endif %} + +{% if cloud_client_library %} + +The client library +------------------------------------------------------------------------------- + +This sample uses the `Google Cloud Client Library for Python`_. +You can read the documentation for more details on API usage and use GitHub +to `browse the source`_ and `report issues`_. + +.. _Google Cloud Client Library for Python: + https://googlecloudplatform.github.io/google-cloud-python/ +.. _browse the source: + https://github.com/GoogleCloudPlatform/google-cloud-python +.. _report issues: + https://github.com/GoogleCloudPlatform/google-cloud-python/issues + +{% endif %} + +.. _Google Cloud SDK: https://cloud.google.com/sdk/ \ No newline at end of file diff --git a/scripts/readme-gen/templates/auth.tmpl.rst b/scripts/readme-gen/templates/auth.tmpl.rst new file mode 100644 index 00000000..1446b94a --- /dev/null +++ b/scripts/readme-gen/templates/auth.tmpl.rst @@ -0,0 +1,9 @@ +Authentication +++++++++++++++ + +This sample requires you to have authentication setup. Refer to the +`Authentication Getting Started Guide`_ for instructions on setting up +credentials for applications. + +.. _Authentication Getting Started Guide: + https://cloud.google.com/docs/authentication/getting-started diff --git a/scripts/readme-gen/templates/auth_api_key.tmpl.rst b/scripts/readme-gen/templates/auth_api_key.tmpl.rst new file mode 100644 index 00000000..11957ce2 --- /dev/null +++ b/scripts/readme-gen/templates/auth_api_key.tmpl.rst @@ -0,0 +1,14 @@ +Authentication +++++++++++++++ + +Authentication for this service is done via an `API Key`_. To obtain an API +Key: + +1. Open the `Cloud Platform Console`_ +2. Make sure that billing is enabled for your project. +3. From the **Credentials** page, create a new **API Key** or use an existing + one for your project. + +.. _API Key: + https://developers.google.com/api-client-library/python/guide/aaa_apikeys +.. _Cloud Console: https://console.cloud.google.com/project?_ diff --git a/scripts/readme-gen/templates/install_deps.tmpl.rst b/scripts/readme-gen/templates/install_deps.tmpl.rst new file mode 100644 index 00000000..a0406dba --- /dev/null +++ b/scripts/readme-gen/templates/install_deps.tmpl.rst @@ -0,0 +1,29 @@ +Install Dependencies +++++++++++++++++++++ + +#. Clone python-docs-samples and change directory to the sample directory you want to use. + + .. code-block:: bash + + $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git + +#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. + + .. _Python Development Environment Setup Guide: + https://cloud.google.com/python/setup + +#. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. + + .. code-block:: bash + + $ virtualenv env + $ source env/bin/activate + +#. Install the dependencies needed to run the samples. + + .. code-block:: bash + + $ pip install -r requirements.txt + +.. _pip: https://pip.pypa.io/ +.. _virtualenv: https://virtualenv.pypa.io/ diff --git a/scripts/readme-gen/templates/install_portaudio.tmpl.rst b/scripts/readme-gen/templates/install_portaudio.tmpl.rst new file mode 100644 index 00000000..5ea33d18 --- /dev/null +++ b/scripts/readme-gen/templates/install_portaudio.tmpl.rst @@ -0,0 +1,35 @@ +Install PortAudio ++++++++++++++++++ + +Install `PortAudio`_. This is required by the `PyAudio`_ library to stream +audio from your computer's microphone. PyAudio depends on PortAudio for cross-platform compatibility, and is installed differently depending on the +platform. + +* For Mac OS X, you can use `Homebrew`_:: + + brew install portaudio + + **Note**: if you encounter an error when running `pip install` that indicates + it can't find `portaudio.h`, try running `pip install` with the following + flags:: + + pip install --global-option='build_ext' \ + --global-option='-I/usr/local/include' \ + --global-option='-L/usr/local/lib' \ + pyaudio + +* For Debian / Ubuntu Linux:: + + apt-get install portaudio19-dev python-all-dev + +* Windows may work without having to install PortAudio explicitly (it will get + installed with PyAudio). + +For more details, see the `PyAudio installation`_ page. + + +.. _PyAudio: https://people.csail.mit.edu/hubert/pyaudio/ +.. _PortAudio: http://www.portaudio.com/ +.. _PyAudio installation: + https://people.csail.mit.edu/hubert/pyaudio/#downloads +.. _Homebrew: http://brew.sh diff --git a/setup.cfg b/setup.cfg index 3bd55550..c3a2b39f 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,3 +1,19 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # Generated by synthtool. DO NOT EDIT! [bdist_wheel] universal = 1 diff --git a/synth.metadata b/synth.metadata index 79b7aa93..54dcde69 100644 --- a/synth.metadata +++ b/synth.metadata @@ -1,26 +1,25 @@ { - "updateTime": "2020-01-31T18:17:08.114692Z", "sources": [ { - "generator": { - "name": "artman", - "version": "0.44.4", - "dockerImage": "googleapis/artman@sha256:19e945954fc960a4bdfee6cb34695898ab21a8cf0bac063ee39b91f00a1faec8" + "git": { + "name": ".", + "remote": "https://github.com/googleapis/python-automl.git", + "sha": "d6f05a13483cdeccc2532669686d8f6472867bd7" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "2717b8a1c762b26911b45ecc2e4ee01d98401b28", - "internalRef": "292555664" + "sha": "5b85137bf6fb01dcf8a949a6a04eee6ed0c22bec", + "internalRef": "317760971" } }, { - "template": { - "name": "python_split_library", - "origin": "synthtool.gcp", - "version": "2019.10.17" + "git": { + "name": "synthtool", + "remote": "https://github.com/googleapis/synthtool.git", + "sha": "cf2eff09d0f5319a4dc5cdce2b6356d85af4a798" } } ], @@ -31,8 +30,7 @@ "apiName": "automl", "apiVersion": "v1beta1", "language": "python", - "generator": "gapic", - "config": "google/cloud/automl/artman_automl_v1beta1.yaml" + "generator": "bazel" } }, { @@ -41,8 +39,7 @@ "apiName": "automl", "apiVersion": "v1", "language": "python", - "generator": "gapic", - "config": "google/cloud/automl/artman_automl_v1.yaml" + "generator": "bazel" } } ] diff --git a/synth.py b/synth.py index da66864c..498fdba3 100644 --- a/synth.py +++ b/synth.py @@ -105,14 +105,6 @@ flags=re.DOTALL, ) -# Replace docstring with no summary line -s.replace( - "google/cloud/**/io_pb2.py", - r"""__doc__ = \"\"\"- For Translation: CSV file ``translation\.csv``, with each """, - r'''__doc__ = """ -- For Translation: CSV file ``translation.csv``, with each ''', - flags=re.DOTALL, -) s.replace("google/cloud/**/io_pb2.py", r":raw-latex:`\\t `", r"\\\\t") @@ -131,15 +123,42 @@ # Make \n visible in JSONL samples s.replace("google/cloud/**/io_pb2.py", r"\}\\n", r"}\\\\n") +# properly escape emphasis +s.replace("google/cloud/**/*.py", +"""image_classification_dataset_metadata:\*""", +"""``image_classification_dataset_metadata``""") + +s.replace("google/cloud/**/*.py", +"""video_classification_model_metadata:\*""", +"""``video_classification_model_metadata:*``""") + +# Escape '_' at the end of the line in pb2 docstrings +s.replace( +"google/cloud/**/*_pb2.py", +"""\_$""", +"""\_""", +) # ---------------------------------------------------------------------------- # Add templated files # ---------------------------------------------------------------------------- templated_files = common.py_library( - unit_cov_level=82, cov_level=83, system_test_dependencies=["test_utils"] + unit_cov_level=82, cov_level=83 ) s.move(templated_files) +# TODO(busunkim): Use latest sphinx after microgenerator transition +s.replace("noxfile.py", """['"]sphinx['"]""", '"sphinx<3.0.0"') +# TODO(busunkim): Remove after microgenerator transition. +# This is being added to AutoML because the proto comments are long and +# regex replaces are a brittle temporary solution. +s.replace( +"noxfile.py", +""""-W", # warnings as errors +\s+"-T", \# show full traceback on exception""", +""""-T", # show full traceback on exception""") + + # install with extras (pandas, storage) s.replace( "noxfile.py", diff --git a/testing/.gitignore b/testing/.gitignore new file mode 100644 index 00000000..b05fbd63 --- /dev/null +++ b/testing/.gitignore @@ -0,0 +1,3 @@ +test-env.sh +service-account.json +client-secrets.json \ No newline at end of file diff --git a/tests/unit/gapic/v1/test_auto_ml_client_v1.py b/tests/unit/gapic/v1/test_auto_ml_client_v1.py index 7a4558d2..1b8ae7d5 100644 --- a/tests/unit/gapic/v1/test_auto_ml_client_v1.py +++ b/tests/unit/gapic/v1/test_auto_ml_client_v1.py @@ -135,58 +135,6 @@ def test_create_dataset_exception(self): exception = response.exception() assert exception.errors[0] == error - def test_update_dataset(self): - # Setup Expected Response - name = "name3373707" - display_name = "displayName1615086568" - description = "description-1724546052" - example_count = 1517063674 - etag = "etag3123477" - expected_response = { - "name": name, - "display_name": display_name, - "description": description, - "example_count": example_count, - "etag": etag, - } - expected_response = dataset_pb2.Dataset(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.AutoMlClient() - - # Setup Request - dataset = {} - update_mask = {} - - response = client.update_dataset(dataset, update_mask) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = service_pb2.UpdateDatasetRequest( - dataset=dataset, update_mask=update_mask - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_dataset_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.AutoMlClient() - - # Setup request - dataset = {} - update_mask = {} - - with pytest.raises(CustomException): - client.update_dataset(dataset, update_mask) - def test_get_dataset(self): # Setup Expected Response name_2 = "name2-1052831874" @@ -278,6 +226,58 @@ def test_list_datasets_exception(self): with pytest.raises(CustomException): list(paged_list_response) + def test_update_dataset(self): + # Setup Expected Response + name = "name3373707" + display_name = "displayName1615086568" + description = "description-1724546052" + example_count = 1517063674 + etag = "etag3123477" + expected_response = { + "name": name, + "display_name": display_name, + "description": description, + "example_count": example_count, + "etag": etag, + } + expected_response = dataset_pb2.Dataset(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = automl_v1.AutoMlClient() + + # Setup Request + dataset = {} + update_mask = {} + + response = client.update_dataset(dataset, update_mask) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = service_pb2.UpdateDatasetRequest( + dataset=dataset, update_mask=update_mask + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_update_dataset_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = automl_v1.AutoMlClient() + + # Setup request + dataset = {} + update_mask = {} + + with pytest.raises(CustomException): + client.update_dataset(dataset, update_mask) + def test_delete_dataset(self): # Setup Expected Response expected_response = {} @@ -591,56 +591,6 @@ def test_get_model_exception(self): with pytest.raises(CustomException): client.get_model(name) - def test_update_model(self): - # Setup Expected Response - name = "name3373707" - display_name = "displayName1615086568" - dataset_id = "datasetId-2115646910" - etag = "etag3123477" - expected_response = { - "name": name, - "display_name": display_name, - "dataset_id": dataset_id, - "etag": etag, - } - expected_response = model_pb2.Model(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.AutoMlClient() - - # Setup Request - model = {} - update_mask = {} - - response = client.update_model(model, update_mask) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = service_pb2.UpdateModelRequest( - model=model, update_mask=update_mask - ) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_model_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1.AutoMlClient() - - # Setup request - model = {} - update_mask = {} - - with pytest.raises(CustomException): - client.update_model(model, update_mask) - def test_list_models(self): # Setup Expected Response next_page_token = "" @@ -734,6 +684,56 @@ def test_delete_model_exception(self): exception = response.exception() assert exception.errors[0] == error + def test_update_model(self): + # Setup Expected Response + name = "name3373707" + display_name = "displayName1615086568" + dataset_id = "datasetId-2115646910" + etag = "etag3123477" + expected_response = { + "name": name, + "display_name": display_name, + "dataset_id": dataset_id, + "etag": etag, + } + expected_response = model_pb2.Model(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = automl_v1.AutoMlClient() + + # Setup Request + model = {} + update_mask = {} + + response = client.update_model(model, update_mask) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = service_pb2.UpdateModelRequest( + model=model, update_mask=update_mask + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_update_model_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = automl_v1.AutoMlClient() + + # Setup request + model = {} + update_mask = {} + + with pytest.raises(CustomException): + client.update_model(model, update_mask) + def test_deploy_model(self): # Setup Expected Response expected_response = {} diff --git a/tests/unit/gapic/v1beta1/test_auto_ml_client_v1beta1.py b/tests/unit/gapic/v1beta1/test_auto_ml_client_v1beta1.py index 702a3190..87d1fe03 100644 --- a/tests/unit/gapic/v1beta1/test_auto_ml_client_v1beta1.py +++ b/tests/unit/gapic/v1beta1/test_auto_ml_client_v1beta1.py @@ -123,54 +123,6 @@ def test_create_dataset_exception(self): with pytest.raises(CustomException): client.create_dataset(parent, dataset) - def test_update_dataset(self): - # Setup Expected Response - name = "name3373707" - display_name = "displayName1615086568" - description = "description-1724546052" - example_count = 1517063674 - etag = "etag3123477" - expected_response = { - "name": name, - "display_name": display_name, - "description": description, - "example_count": example_count, - "etag": etag, - } - expected_response = dataset_pb2.Dataset(**expected_response) - - # Mock the API response - channel = ChannelStub(responses=[expected_response]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup Request - dataset = {} - - response = client.update_dataset(dataset) - assert expected_response == response - - assert len(channel.requests) == 1 - expected_request = service_pb2.UpdateDatasetRequest(dataset=dataset) - actual_request = channel.requests[0][1] - assert expected_request == actual_request - - def test_update_dataset_exception(self): - # Mock the API response - channel = ChannelStub(responses=[CustomException()]) - patch = mock.patch("google.api_core.grpc_helpers.create_channel") - with patch as create_channel: - create_channel.return_value = channel - client = automl_v1beta1.AutoMlClient() - - # Setup request - dataset = {} - - with pytest.raises(CustomException): - client.update_dataset(dataset) - def test_get_dataset(self): # Setup Expected Response name_2 = "name2-1052831874" @@ -262,6 +214,54 @@ def test_list_datasets_exception(self): with pytest.raises(CustomException): list(paged_list_response) + def test_update_dataset(self): + # Setup Expected Response + name = "name3373707" + display_name = "displayName1615086568" + description = "description-1724546052" + example_count = 1517063674 + etag = "etag3123477" + expected_response = { + "name": name, + "display_name": display_name, + "description": description, + "example_count": example_count, + "etag": etag, + } + expected_response = dataset_pb2.Dataset(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = automl_v1beta1.AutoMlClient() + + # Setup Request + dataset = {} + + response = client.update_dataset(dataset) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = service_pb2.UpdateDatasetRequest(dataset=dataset) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_update_dataset_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = automl_v1beta1.AutoMlClient() + + # Setup request + dataset = {} + + with pytest.raises(CustomException): + client.update_dataset(dataset) + def test_delete_dataset(self): # Setup Expected Response expected_response = {} @@ -420,76 +420,71 @@ def test_export_data_exception(self): exception = response.exception() assert exception.errors[0] == error - def test_create_model(self): + def test_get_annotation_spec(self): # Setup Expected Response - name = "name3373707" + name_2 = "name2-1052831874" display_name = "displayName1615086568" - dataset_id = "datasetId-2115646910" + example_count = 1517063674 expected_response = { - "name": name, + "name": name_2, "display_name": display_name, - "dataset_id": dataset_id, + "example_count": example_count, } - expected_response = model_pb2.Model(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_create_model", done=True - ) - operation.response.Pack(expected_response) + expected_response = annotation_spec_pb2.AnnotationSpec(**expected_response) # Mock the API response - channel = ChannelStub(responses=[operation]) + channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = automl_v1beta1.AutoMlClient() # Setup Request - parent = client.location_path("[PROJECT]", "[LOCATION]") - model = {} + name = client.annotation_spec_path( + "[PROJECT]", "[LOCATION]", "[DATASET]", "[ANNOTATION_SPEC]" + ) - response = client.create_model(parent, model) - result = response.result() - assert expected_response == result + response = client.get_annotation_spec(name) + assert expected_response == response assert len(channel.requests) == 1 - expected_request = service_pb2.CreateModelRequest(parent=parent, model=model) + expected_request = service_pb2.GetAnnotationSpecRequest(name=name) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_create_model_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_create_model_exception", done=True - ) - operation.error.CopyFrom(error) - + def test_get_annotation_spec_exception(self): # Mock the API response - channel = ChannelStub(responses=[operation]) + channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = automl_v1beta1.AutoMlClient() - # Setup Request - parent = client.location_path("[PROJECT]", "[LOCATION]") - model = {} + # Setup request + name = client.annotation_spec_path( + "[PROJECT]", "[LOCATION]", "[DATASET]", "[ANNOTATION_SPEC]" + ) - response = client.create_model(parent, model) - exception = response.exception() - assert exception.errors[0] == error + with pytest.raises(CustomException): + client.get_annotation_spec(name) - def test_get_model(self): + def test_get_table_spec(self): # Setup Expected Response name_2 = "name2-1052831874" - display_name = "displayName1615086568" - dataset_id = "datasetId-2115646910" + time_column_spec_id = "timeColumnSpecId1558734824" + row_count = 1340416618 + valid_row_count = 406068761 + column_count = 122671386 + etag = "etag3123477" expected_response = { "name": name_2, - "display_name": display_name, - "dataset_id": dataset_id, + "time_column_spec_id": time_column_spec_id, + "row_count": row_count, + "valid_row_count": valid_row_count, + "column_count": column_count, + "etag": etag, } - expected_response = model_pb2.Model(**expected_response) + expected_response = table_spec_pb2.TableSpec(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) @@ -499,17 +494,19 @@ def test_get_model(self): client = automl_v1beta1.AutoMlClient() # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") + name = client.table_spec_path( + "[PROJECT]", "[LOCATION]", "[DATASET]", "[TABLE_SPEC]" + ) - response = client.get_model(name) + response = client.get_table_spec(name) assert expected_response == response assert len(channel.requests) == 1 - expected_request = service_pb2.GetModelRequest(name=name) + expected_request = service_pb2.GetTableSpecRequest(name=name) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_get_model_exception(self): + def test_get_table_spec_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") @@ -518,18 +515,23 @@ def test_get_model_exception(self): client = automl_v1beta1.AutoMlClient() # Setup request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") + name = client.table_spec_path( + "[PROJECT]", "[LOCATION]", "[DATASET]", "[TABLE_SPEC]" + ) with pytest.raises(CustomException): - client.get_model(name) + client.get_table_spec(name) - def test_list_models(self): + def test_list_table_specs(self): # Setup Expected Response next_page_token = "" - model_element = {} - model = [model_element] - expected_response = {"next_page_token": next_page_token, "model": model} - expected_response = service_pb2.ListModelsResponse(**expected_response) + table_specs_element = {} + table_specs = [table_specs_element] + expected_response = { + "next_page_token": next_page_token, + "table_specs": table_specs, + } + expected_response = service_pb2.ListTableSpecsResponse(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) @@ -539,20 +541,20 @@ def test_list_models(self): client = automl_v1beta1.AutoMlClient() # Setup Request - parent = client.location_path("[PROJECT]", "[LOCATION]") + parent = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]") - paged_list_response = client.list_models(parent) + paged_list_response = client.list_table_specs(parent) resources = list(paged_list_response) assert len(resources) == 1 - assert expected_response.model[0] == resources[0] + assert expected_response.table_specs[0] == resources[0] assert len(channel.requests) == 1 - expected_request = service_pb2.ListModelsRequest(parent=parent) + expected_request = service_pb2.ListTableSpecsRequest(parent=parent) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_list_models_exception(self): + def test_list_table_specs_exception(self): channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: @@ -560,175 +562,163 @@ def test_list_models_exception(self): client = automl_v1beta1.AutoMlClient() # Setup request - parent = client.location_path("[PROJECT]", "[LOCATION]") + parent = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]") - paged_list_response = client.list_models(parent) + paged_list_response = client.list_table_specs(parent) with pytest.raises(CustomException): list(paged_list_response) - def test_delete_model(self): + def test_update_table_spec(self): # Setup Expected Response - expected_response = {} - expected_response = empty_pb2.Empty(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_delete_model", done=True - ) - operation.response.Pack(expected_response) + name = "name3373707" + time_column_spec_id = "timeColumnSpecId1558734824" + row_count = 1340416618 + valid_row_count = 406068761 + column_count = 122671386 + etag = "etag3123477" + expected_response = { + "name": name, + "time_column_spec_id": time_column_spec_id, + "row_count": row_count, + "valid_row_count": valid_row_count, + "column_count": column_count, + "etag": etag, + } + expected_response = table_spec_pb2.TableSpec(**expected_response) # Mock the API response - channel = ChannelStub(responses=[operation]) + channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = automl_v1beta1.AutoMlClient() # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") + table_spec = {} - response = client.delete_model(name) - result = response.result() - assert expected_response == result + response = client.update_table_spec(table_spec) + assert expected_response == response assert len(channel.requests) == 1 - expected_request = service_pb2.DeleteModelRequest(name=name) + expected_request = service_pb2.UpdateTableSpecRequest(table_spec=table_spec) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_delete_model_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_delete_model_exception", done=True - ) - operation.error.CopyFrom(error) - + def test_update_table_spec_exception(self): # Mock the API response - channel = ChannelStub(responses=[operation]) + channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = automl_v1beta1.AutoMlClient() - # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") + # Setup request + table_spec = {} - response = client.delete_model(name) - exception = response.exception() - assert exception.errors[0] == error + with pytest.raises(CustomException): + client.update_table_spec(table_spec) - def test_deploy_model(self): + def test_get_column_spec(self): # Setup Expected Response - expected_response = {} - expected_response = empty_pb2.Empty(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_deploy_model", done=True - ) - operation.response.Pack(expected_response) + name_2 = "name2-1052831874" + display_name = "displayName1615086568" + etag = "etag3123477" + expected_response = {"name": name_2, "display_name": display_name, "etag": etag} + expected_response = column_spec_pb2.ColumnSpec(**expected_response) # Mock the API response - channel = ChannelStub(responses=[operation]) + channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = automl_v1beta1.AutoMlClient() # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") + name = client.column_spec_path( + "[PROJECT]", "[LOCATION]", "[DATASET]", "[TABLE_SPEC]", "[COLUMN_SPEC]" + ) - response = client.deploy_model(name) - result = response.result() - assert expected_response == result + response = client.get_column_spec(name) + assert expected_response == response assert len(channel.requests) == 1 - expected_request = service_pb2.DeployModelRequest(name=name) + expected_request = service_pb2.GetColumnSpecRequest(name=name) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_deploy_model_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_deploy_model_exception", done=True - ) - operation.error.CopyFrom(error) - + def test_get_column_spec_exception(self): # Mock the API response - channel = ChannelStub(responses=[operation]) + channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = automl_v1beta1.AutoMlClient() - # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") + # Setup request + name = client.column_spec_path( + "[PROJECT]", "[LOCATION]", "[DATASET]", "[TABLE_SPEC]", "[COLUMN_SPEC]" + ) - response = client.deploy_model(name) - exception = response.exception() - assert exception.errors[0] == error + with pytest.raises(CustomException): + client.get_column_spec(name) - def test_undeploy_model(self): + def test_list_column_specs(self): # Setup Expected Response - expected_response = {} - expected_response = empty_pb2.Empty(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_undeploy_model", done=True - ) - operation.response.Pack(expected_response) + next_page_token = "" + column_specs_element = {} + column_specs = [column_specs_element] + expected_response = { + "next_page_token": next_page_token, + "column_specs": column_specs, + } + expected_response = service_pb2.ListColumnSpecsResponse(**expected_response) # Mock the API response - channel = ChannelStub(responses=[operation]) + channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = automl_v1beta1.AutoMlClient() # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") + parent = client.table_spec_path( + "[PROJECT]", "[LOCATION]", "[DATASET]", "[TABLE_SPEC]" + ) - response = client.undeploy_model(name) - result = response.result() - assert expected_response == result + paged_list_response = client.list_column_specs(parent) + resources = list(paged_list_response) + assert len(resources) == 1 + + assert expected_response.column_specs[0] == resources[0] assert len(channel.requests) == 1 - expected_request = service_pb2.UndeployModelRequest(name=name) + expected_request = service_pb2.ListColumnSpecsRequest(parent=parent) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_undeploy_model_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_undeploy_model_exception", done=True - ) - operation.error.CopyFrom(error) - - # Mock the API response - channel = ChannelStub(responses=[operation]) + def test_list_column_specs_exception(self): + channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = automl_v1beta1.AutoMlClient() - # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") + # Setup request + parent = client.table_spec_path( + "[PROJECT]", "[LOCATION]", "[DATASET]", "[TABLE_SPEC]" + ) - response = client.undeploy_model(name) - exception = response.exception() - assert exception.errors[0] == error + paged_list_response = client.list_column_specs(parent) + with pytest.raises(CustomException): + list(paged_list_response) - def test_get_model_evaluation(self): + def test_update_column_spec(self): # Setup Expected Response - name_2 = "name2-1052831874" - annotation_spec_id = "annotationSpecId60690191" + name = "name3373707" display_name = "displayName1615086568" - evaluated_example_count = 277565350 - expected_response = { - "name": name_2, - "annotation_spec_id": annotation_spec_id, - "display_name": display_name, - "evaluated_example_count": evaluated_example_count, - } - expected_response = model_evaluation_pb2.ModelEvaluation(**expected_response) + etag = "etag3123477" + expected_response = {"name": name, "display_name": display_name, "etag": etag} + expected_response = column_spec_pb2.ColumnSpec(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) @@ -738,19 +728,17 @@ def test_get_model_evaluation(self): client = automl_v1beta1.AutoMlClient() # Setup Request - name = client.model_evaluation_path( - "[PROJECT]", "[LOCATION]", "[MODEL]", "[MODEL_EVALUATION]" - ) + column_spec = {} - response = client.get_model_evaluation(name) + response = client.update_column_spec(column_spec) assert expected_response == response assert len(channel.requests) == 1 - expected_request = service_pb2.GetModelEvaluationRequest(name=name) + expected_request = service_pb2.UpdateColumnSpecRequest(column_spec=column_spec) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_get_model_evaluation_exception(self): + def test_update_column_spec_exception(self): # Mock the API response channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") @@ -759,19 +747,24 @@ def test_get_model_evaluation_exception(self): client = automl_v1beta1.AutoMlClient() # Setup request - name = client.model_evaluation_path( - "[PROJECT]", "[LOCATION]", "[MODEL]", "[MODEL_EVALUATION]" - ) + column_spec = {} with pytest.raises(CustomException): - client.get_model_evaluation(name) + client.update_column_spec(column_spec) - def test_export_model(self): + def test_create_model(self): # Setup Expected Response - expected_response = {} - expected_response = empty_pb2.Empty(**expected_response) + name = "name3373707" + display_name = "displayName1615086568" + dataset_id = "datasetId-2115646910" + expected_response = { + "name": name, + "display_name": display_name, + "dataset_id": dataset_id, + } + expected_response = model_pb2.Model(**expected_response) operation = operations_pb2.Operation( - name="operations/test_export_model", done=True + name="operations/test_create_model", done=True ) operation.response.Pack(expected_response) @@ -783,25 +776,23 @@ def test_export_model(self): client = automl_v1beta1.AutoMlClient() # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - output_config = {} + parent = client.location_path("[PROJECT]", "[LOCATION]") + model = {} - response = client.export_model(name, output_config) + response = client.create_model(parent, model) result = response.result() assert expected_response == result assert len(channel.requests) == 1 - expected_request = service_pb2.ExportModelRequest( - name=name, output_config=output_config - ) + expected_request = service_pb2.CreateModelRequest(parent=parent, model=model) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_export_model_exception(self): + def test_create_model_exception(self): # Setup Response error = status_pb2.Status() operation = operations_pb2.Operation( - name="operations/test_export_model_exception", done=True + name="operations/test_create_model_exception", done=True ) operation.error.CopyFrom(error) @@ -813,24 +804,27 @@ def test_export_model_exception(self): client = automl_v1beta1.AutoMlClient() # Setup Request - name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - output_config = {} + parent = client.location_path("[PROJECT]", "[LOCATION]") + model = {} - response = client.export_model(name, output_config) + response = client.create_model(parent, model) exception = response.exception() assert exception.errors[0] == error - def test_export_evaluated_examples(self): + def test_get_model(self): # Setup Expected Response - expected_response = {} - expected_response = empty_pb2.Empty(**expected_response) - operation = operations_pb2.Operation( - name="operations/test_export_evaluated_examples", done=True - ) - operation.response.Pack(expected_response) + name_2 = "name2-1052831874" + display_name = "displayName1615086568" + dataset_id = "datasetId-2115646910" + expected_response = { + "name": name_2, + "display_name": display_name, + "dataset_id": dataset_id, + } + expected_response = model_pb2.Model(**expected_response) # Mock the API response - channel = ChannelStub(responses=[operation]) + channel = ChannelStub(responses=[expected_response]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel @@ -838,54 +832,36 @@ def test_export_evaluated_examples(self): # Setup Request name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - output_config = {} - response = client.export_evaluated_examples(name, output_config) - result = response.result() - assert expected_response == result + response = client.get_model(name) + assert expected_response == response assert len(channel.requests) == 1 - expected_request = service_pb2.ExportEvaluatedExamplesRequest( - name=name, output_config=output_config - ) + expected_request = service_pb2.GetModelRequest(name=name) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_export_evaluated_examples_exception(self): - # Setup Response - error = status_pb2.Status() - operation = operations_pb2.Operation( - name="operations/test_export_evaluated_examples_exception", done=True - ) - operation.error.CopyFrom(error) - + def test_get_model_exception(self): # Mock the API response - channel = ChannelStub(responses=[operation]) + channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = automl_v1beta1.AutoMlClient() - # Setup Request + # Setup request name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - output_config = {} - response = client.export_evaluated_examples(name, output_config) - exception = response.exception() - assert exception.errors[0] == error + with pytest.raises(CustomException): + client.get_model(name) - def test_list_model_evaluations(self): + def test_list_models(self): # Setup Expected Response next_page_token = "" - model_evaluation_element = {} - model_evaluation = [model_evaluation_element] - expected_response = { - "next_page_token": next_page_token, - "model_evaluation": model_evaluation, - } - expected_response = service_pb2.ListModelEvaluationsResponse( - **expected_response - ) + model_element = {} + model = [model_element] + expected_response = {"next_page_token": next_page_token, "model": model} + expected_response = service_pb2.ListModelsResponse(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) @@ -895,20 +871,20 @@ def test_list_model_evaluations(self): client = automl_v1beta1.AutoMlClient() # Setup Request - parent = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") + parent = client.location_path("[PROJECT]", "[LOCATION]") - paged_list_response = client.list_model_evaluations(parent) + paged_list_response = client.list_models(parent) resources = list(paged_list_response) assert len(resources) == 1 - assert expected_response.model_evaluation[0] == resources[0] + assert expected_response.model[0] == resources[0] assert len(channel.requests) == 1 - expected_request = service_pb2.ListModelEvaluationsRequest(parent=parent) + expected_request = service_pb2.ListModelsRequest(parent=parent) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_list_model_evaluations_exception(self): + def test_list_models_exception(self): channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: @@ -916,264 +892,283 @@ def test_list_model_evaluations_exception(self): client = automl_v1beta1.AutoMlClient() # Setup request - parent = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") + parent = client.location_path("[PROJECT]", "[LOCATION]") - paged_list_response = client.list_model_evaluations(parent) + paged_list_response = client.list_models(parent) with pytest.raises(CustomException): list(paged_list_response) - def test_get_annotation_spec(self): + def test_delete_model(self): # Setup Expected Response - name_2 = "name2-1052831874" - display_name = "displayName1615086568" - example_count = 1517063674 - expected_response = { - "name": name_2, - "display_name": display_name, - "example_count": example_count, - } - expected_response = annotation_spec_pb2.AnnotationSpec(**expected_response) + expected_response = {} + expected_response = empty_pb2.Empty(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_delete_model", done=True + ) + operation.response.Pack(expected_response) # Mock the API response - channel = ChannelStub(responses=[expected_response]) + channel = ChannelStub(responses=[operation]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = automl_v1beta1.AutoMlClient() # Setup Request - name = client.annotation_spec_path( - "[PROJECT]", "[LOCATION]", "[DATASET]", "[ANNOTATION_SPEC]" - ) + name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - response = client.get_annotation_spec(name) - assert expected_response == response + response = client.delete_model(name) + result = response.result() + assert expected_response == result assert len(channel.requests) == 1 - expected_request = service_pb2.GetAnnotationSpecRequest(name=name) + expected_request = service_pb2.DeleteModelRequest(name=name) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_get_annotation_spec_exception(self): + def test_delete_model_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_delete_model_exception", done=True + ) + operation.error.CopyFrom(error) + # Mock the API response - channel = ChannelStub(responses=[CustomException()]) + channel = ChannelStub(responses=[operation]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = automl_v1beta1.AutoMlClient() - # Setup request - name = client.annotation_spec_path( - "[PROJECT]", "[LOCATION]", "[DATASET]", "[ANNOTATION_SPEC]" - ) + # Setup Request + name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - with pytest.raises(CustomException): - client.get_annotation_spec(name) + response = client.delete_model(name) + exception = response.exception() + assert exception.errors[0] == error - def test_get_table_spec(self): + def test_deploy_model(self): # Setup Expected Response - name_2 = "name2-1052831874" - time_column_spec_id = "timeColumnSpecId1558734824" - row_count = 1340416618 - valid_row_count = 406068761 - column_count = 122671386 - etag = "etag3123477" - expected_response = { - "name": name_2, - "time_column_spec_id": time_column_spec_id, - "row_count": row_count, - "valid_row_count": valid_row_count, - "column_count": column_count, - "etag": etag, - } - expected_response = table_spec_pb2.TableSpec(**expected_response) + expected_response = {} + expected_response = empty_pb2.Empty(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_deploy_model", done=True + ) + operation.response.Pack(expected_response) # Mock the API response - channel = ChannelStub(responses=[expected_response]) + channel = ChannelStub(responses=[operation]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = automl_v1beta1.AutoMlClient() # Setup Request - name = client.table_spec_path( - "[PROJECT]", "[LOCATION]", "[DATASET]", "[TABLE_SPEC]" - ) + name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - response = client.get_table_spec(name) - assert expected_response == response + response = client.deploy_model(name) + result = response.result() + assert expected_response == result assert len(channel.requests) == 1 - expected_request = service_pb2.GetTableSpecRequest(name=name) + expected_request = service_pb2.DeployModelRequest(name=name) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_get_table_spec_exception(self): + def test_deploy_model_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_deploy_model_exception", done=True + ) + operation.error.CopyFrom(error) + # Mock the API response - channel = ChannelStub(responses=[CustomException()]) + channel = ChannelStub(responses=[operation]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = automl_v1beta1.AutoMlClient() - # Setup request - name = client.table_spec_path( - "[PROJECT]", "[LOCATION]", "[DATASET]", "[TABLE_SPEC]" - ) + # Setup Request + name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - with pytest.raises(CustomException): - client.get_table_spec(name) + response = client.deploy_model(name) + exception = response.exception() + assert exception.errors[0] == error - def test_list_table_specs(self): + def test_undeploy_model(self): # Setup Expected Response - next_page_token = "" - table_specs_element = {} - table_specs = [table_specs_element] - expected_response = { - "next_page_token": next_page_token, - "table_specs": table_specs, - } - expected_response = service_pb2.ListTableSpecsResponse(**expected_response) + expected_response = {} + expected_response = empty_pb2.Empty(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_undeploy_model", done=True + ) + operation.response.Pack(expected_response) # Mock the API response - channel = ChannelStub(responses=[expected_response]) + channel = ChannelStub(responses=[operation]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = automl_v1beta1.AutoMlClient() # Setup Request - parent = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]") - - paged_list_response = client.list_table_specs(parent) - resources = list(paged_list_response) - assert len(resources) == 1 + name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - assert expected_response.table_specs[0] == resources[0] + response = client.undeploy_model(name) + result = response.result() + assert expected_response == result assert len(channel.requests) == 1 - expected_request = service_pb2.ListTableSpecsRequest(parent=parent) + expected_request = service_pb2.UndeployModelRequest(name=name) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_list_table_specs_exception(self): - channel = ChannelStub(responses=[CustomException()]) + def test_undeploy_model_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_undeploy_model_exception", done=True + ) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = automl_v1beta1.AutoMlClient() - # Setup request - parent = client.dataset_path("[PROJECT]", "[LOCATION]", "[DATASET]") + # Setup Request + name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - paged_list_response = client.list_table_specs(parent) - with pytest.raises(CustomException): - list(paged_list_response) + response = client.undeploy_model(name) + exception = response.exception() + assert exception.errors[0] == error - def test_update_table_spec(self): + def test_export_model(self): # Setup Expected Response - name = "name3373707" - time_column_spec_id = "timeColumnSpecId1558734824" - row_count = 1340416618 - valid_row_count = 406068761 - column_count = 122671386 - etag = "etag3123477" - expected_response = { - "name": name, - "time_column_spec_id": time_column_spec_id, - "row_count": row_count, - "valid_row_count": valid_row_count, - "column_count": column_count, - "etag": etag, - } - expected_response = table_spec_pb2.TableSpec(**expected_response) + expected_response = {} + expected_response = empty_pb2.Empty(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_export_model", done=True + ) + operation.response.Pack(expected_response) # Mock the API response - channel = ChannelStub(responses=[expected_response]) + channel = ChannelStub(responses=[operation]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = automl_v1beta1.AutoMlClient() # Setup Request - table_spec = {} + name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") + output_config = {} - response = client.update_table_spec(table_spec) - assert expected_response == response + response = client.export_model(name, output_config) + result = response.result() + assert expected_response == result assert len(channel.requests) == 1 - expected_request = service_pb2.UpdateTableSpecRequest(table_spec=table_spec) + expected_request = service_pb2.ExportModelRequest( + name=name, output_config=output_config + ) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_update_table_spec_exception(self): + def test_export_model_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_export_model_exception", done=True + ) + operation.error.CopyFrom(error) + # Mock the API response - channel = ChannelStub(responses=[CustomException()]) + channel = ChannelStub(responses=[operation]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = automl_v1beta1.AutoMlClient() - # Setup request - table_spec = {} + # Setup Request + name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") + output_config = {} - with pytest.raises(CustomException): - client.update_table_spec(table_spec) + response = client.export_model(name, output_config) + exception = response.exception() + assert exception.errors[0] == error - def test_get_column_spec(self): + def test_export_evaluated_examples(self): # Setup Expected Response - name_2 = "name2-1052831874" - display_name = "displayName1615086568" - etag = "etag3123477" - expected_response = {"name": name_2, "display_name": display_name, "etag": etag} - expected_response = column_spec_pb2.ColumnSpec(**expected_response) + expected_response = {} + expected_response = empty_pb2.Empty(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_export_evaluated_examples", done=True + ) + operation.response.Pack(expected_response) # Mock the API response - channel = ChannelStub(responses=[expected_response]) + channel = ChannelStub(responses=[operation]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = automl_v1beta1.AutoMlClient() # Setup Request - name = client.column_spec_path( - "[PROJECT]", "[LOCATION]", "[DATASET]", "[TABLE_SPEC]", "[COLUMN_SPEC]" - ) + name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") + output_config = {} - response = client.get_column_spec(name) - assert expected_response == response + response = client.export_evaluated_examples(name, output_config) + result = response.result() + assert expected_response == result assert len(channel.requests) == 1 - expected_request = service_pb2.GetColumnSpecRequest(name=name) + expected_request = service_pb2.ExportEvaluatedExamplesRequest( + name=name, output_config=output_config + ) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_get_column_spec_exception(self): + def test_export_evaluated_examples_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_export_evaluated_examples_exception", done=True + ) + operation.error.CopyFrom(error) + # Mock the API response - channel = ChannelStub(responses=[CustomException()]) + channel = ChannelStub(responses=[operation]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: create_channel.return_value = channel client = automl_v1beta1.AutoMlClient() - # Setup request - name = client.column_spec_path( - "[PROJECT]", "[LOCATION]", "[DATASET]", "[TABLE_SPEC]", "[COLUMN_SPEC]" - ) + # Setup Request + name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") + output_config = {} - with pytest.raises(CustomException): - client.get_column_spec(name) + response = client.export_evaluated_examples(name, output_config) + exception = response.exception() + assert exception.errors[0] == error - def test_list_column_specs(self): + def test_get_model_evaluation(self): # Setup Expected Response - next_page_token = "" - column_specs_element = {} - column_specs = [column_specs_element] + name_2 = "name2-1052831874" + annotation_spec_id = "annotationSpecId60690191" + display_name = "displayName1615086568" + evaluated_example_count = 277565350 expected_response = { - "next_page_token": next_page_token, - "column_specs": column_specs, + "name": name_2, + "annotation_spec_id": annotation_spec_id, + "display_name": display_name, + "evaluated_example_count": evaluated_example_count, } - expected_response = service_pb2.ListColumnSpecsResponse(**expected_response) + expected_response = model_evaluation_pb2.ModelEvaluation(**expected_response) # Mock the API response channel = ChannelStub(responses=[expected_response]) @@ -1183,22 +1178,20 @@ def test_list_column_specs(self): client = automl_v1beta1.AutoMlClient() # Setup Request - parent = client.table_spec_path( - "[PROJECT]", "[LOCATION]", "[DATASET]", "[TABLE_SPEC]" + name = client.model_evaluation_path( + "[PROJECT]", "[LOCATION]", "[MODEL]", "[MODEL_EVALUATION]" ) - paged_list_response = client.list_column_specs(parent) - resources = list(paged_list_response) - assert len(resources) == 1 - - assert expected_response.column_specs[0] == resources[0] + response = client.get_model_evaluation(name) + assert expected_response == response assert len(channel.requests) == 1 - expected_request = service_pb2.ListColumnSpecsRequest(parent=parent) + expected_request = service_pb2.GetModelEvaluationRequest(name=name) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_list_column_specs_exception(self): + def test_get_model_evaluation_exception(self): + # Mock the API response channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: @@ -1206,21 +1199,25 @@ def test_list_column_specs_exception(self): client = automl_v1beta1.AutoMlClient() # Setup request - parent = client.table_spec_path( - "[PROJECT]", "[LOCATION]", "[DATASET]", "[TABLE_SPEC]" + name = client.model_evaluation_path( + "[PROJECT]", "[LOCATION]", "[MODEL]", "[MODEL_EVALUATION]" ) - paged_list_response = client.list_column_specs(parent) with pytest.raises(CustomException): - list(paged_list_response) + client.get_model_evaluation(name) - def test_update_column_spec(self): + def test_list_model_evaluations(self): # Setup Expected Response - name = "name3373707" - display_name = "displayName1615086568" - etag = "etag3123477" - expected_response = {"name": name, "display_name": display_name, "etag": etag} - expected_response = column_spec_pb2.ColumnSpec(**expected_response) + next_page_token = "" + model_evaluation_element = {} + model_evaluation = [model_evaluation_element] + expected_response = { + "next_page_token": next_page_token, + "model_evaluation": model_evaluation, + } + expected_response = service_pb2.ListModelEvaluationsResponse( + **expected_response + ) # Mock the API response channel = ChannelStub(responses=[expected_response]) @@ -1230,18 +1227,20 @@ def test_update_column_spec(self): client = automl_v1beta1.AutoMlClient() # Setup Request - column_spec = {} + parent = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") - response = client.update_column_spec(column_spec) - assert expected_response == response + paged_list_response = client.list_model_evaluations(parent) + resources = list(paged_list_response) + assert len(resources) == 1 + + assert expected_response.model_evaluation[0] == resources[0] assert len(channel.requests) == 1 - expected_request = service_pb2.UpdateColumnSpecRequest(column_spec=column_spec) + expected_request = service_pb2.ListModelEvaluationsRequest(parent=parent) actual_request = channel.requests[0][1] assert expected_request == actual_request - def test_update_column_spec_exception(self): - # Mock the API response + def test_list_model_evaluations_exception(self): channel = ChannelStub(responses=[CustomException()]) patch = mock.patch("google.api_core.grpc_helpers.create_channel") with patch as create_channel: @@ -1249,7 +1248,8 @@ def test_update_column_spec_exception(self): client = automl_v1beta1.AutoMlClient() # Setup request - column_spec = {} + parent = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") + paged_list_response = client.list_model_evaluations(parent) with pytest.raises(CustomException): - client.update_column_spec(column_spec) + list(paged_list_response) diff --git a/tests/unit/gapic/v1beta1/test_prediction_service_client_v1beta1.py b/tests/unit/gapic/v1beta1/test_prediction_service_client_v1beta1.py index 9b510f3a..c83504a4 100644 --- a/tests/unit/gapic/v1beta1/test_prediction_service_client_v1beta1.py +++ b/tests/unit/gapic/v1beta1/test_prediction_service_client_v1beta1.py @@ -128,14 +128,18 @@ def test_batch_predict(self): name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") input_config = {} output_config = {} + params = {} - response = client.batch_predict(name, input_config, output_config) + response = client.batch_predict(name, input_config, output_config, params) result = response.result() assert expected_response == result assert len(channel.requests) == 1 expected_request = prediction_service_pb2.BatchPredictRequest( - name=name, input_config=input_config, output_config=output_config + name=name, + input_config=input_config, + output_config=output_config, + params=params, ) actual_request = channel.requests[0][1] assert expected_request == actual_request @@ -159,7 +163,8 @@ def test_batch_predict_exception(self): name = client.model_path("[PROJECT]", "[LOCATION]", "[MODEL]") input_config = {} output_config = {} + params = {} - response = client.batch_predict(name, input_config, output_config) + response = client.batch_predict(name, input_config, output_config, params) exception = response.exception() assert exception.errors[0] == error