diff --git a/.github/header-checker-lint.yml b/.github/header-checker-lint.yml new file mode 100644 index 00000000..fc281c05 --- /dev/null +++ b/.github/header-checker-lint.yml @@ -0,0 +1,15 @@ +{"allowedCopyrightHolders": ["Google LLC"], + "allowedLicenses": ["Apache-2.0", "MIT", "BSD-3"], + "ignoreFiles": ["**/requirements.txt", "**/requirements-test.txt"], + "sourceFileExtensions": [ + "ts", + "js", + "java", + "sh", + "Dockerfile", + "yaml", + "py", + "html", + "txt" + ] +} \ No newline at end of file diff --git a/.gitignore b/.gitignore index b9daa52f..b4243ced 100644 --- a/.gitignore +++ b/.gitignore @@ -50,8 +50,10 @@ docs.metadata # Virtual environment env/ + +# Test logs coverage.xml -sponge_log.xml +*sponge_log.xml # System test environment variables. system_tests/local_test_setup diff --git a/.kokoro/build.sh b/.kokoro/build.sh index 2ac45dd9..778cbcce 100755 --- a/.kokoro/build.sh +++ b/.kokoro/build.sh @@ -15,7 +15,11 @@ set -eo pipefail -cd github/python-automl +if [[ -z "${PROJECT_ROOT:-}" ]]; then + PROJECT_ROOT="github/python-automl" +fi + +cd "${PROJECT_ROOT}" # Disable buffering, so that the logs stream through. export PYTHONUNBUFFERED=1 @@ -30,16 +34,26 @@ export GOOGLE_APPLICATION_CREDENTIALS=${KOKORO_GFILE_DIR}/service-account.json export PROJECT_ID=$(cat "${KOKORO_GFILE_DIR}/project-id.json") # Remove old nox -python3.6 -m pip uninstall --yes --quiet nox-automation +python3 -m pip uninstall --yes --quiet nox-automation # Install nox -python3.6 -m pip install --upgrade --quiet nox -python3.6 -m nox --version +python3 -m pip install --upgrade --quiet nox +python3 -m nox --version + +# If this is a continuous build, send the test log to the FlakyBot. +# See https://github.com/googleapis/repo-automation-bots/tree/master/packages/flakybot. +if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"continuous"* ]]; then + cleanup() { + chmod +x $KOKORO_GFILE_DIR/linux_amd64/flakybot + $KOKORO_GFILE_DIR/linux_amd64/flakybot + } + trap cleanup EXIT HUP +fi # If NOX_SESSION is set, it only runs the specified session, # otherwise run all the sessions. if [[ -n "${NOX_SESSION:-}" ]]; then - python3.6 -m nox -s "${NOX_SESSION:-}" + python3 -m nox -s ${NOX_SESSION:-} else - python3.6 -m nox + python3 -m nox fi diff --git a/.kokoro/docs/docs-presubmit.cfg b/.kokoro/docs/docs-presubmit.cfg index 11181078..5a7e55cf 100644 --- a/.kokoro/docs/docs-presubmit.cfg +++ b/.kokoro/docs/docs-presubmit.cfg @@ -15,3 +15,14 @@ env_vars: { key: "TRAMPOLINE_IMAGE_UPLOAD" value: "false" } + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-automl/.kokoro/build.sh" +} + +# Only run this nox session. +env_vars: { + key: "NOX_SESSION" + value: "docs docfx" +} diff --git a/.kokoro/samples/python3.6/periodic-head.cfg b/.kokoro/samples/python3.6/periodic-head.cfg new file mode 100644 index 00000000..f9cfcd33 --- /dev/null +++ b/.kokoro/samples/python3.6/periodic-head.cfg @@ -0,0 +1,11 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-pubsub/.kokoro/test-samples-against-head.sh" +} diff --git a/.kokoro/samples/python3.7/periodic-head.cfg b/.kokoro/samples/python3.7/periodic-head.cfg new file mode 100644 index 00000000..f9cfcd33 --- /dev/null +++ b/.kokoro/samples/python3.7/periodic-head.cfg @@ -0,0 +1,11 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-pubsub/.kokoro/test-samples-against-head.sh" +} diff --git a/.kokoro/samples/python3.8/periodic-head.cfg b/.kokoro/samples/python3.8/periodic-head.cfg new file mode 100644 index 00000000..f9cfcd33 --- /dev/null +++ b/.kokoro/samples/python3.8/periodic-head.cfg @@ -0,0 +1,11 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-pubsub/.kokoro/test-samples-against-head.sh" +} diff --git a/.kokoro/test-samples-against-head.sh b/.kokoro/test-samples-against-head.sh new file mode 100755 index 00000000..288451b6 --- /dev/null +++ b/.kokoro/test-samples-against-head.sh @@ -0,0 +1,28 @@ +#!/bin/bash +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# A customized test runner for samples. +# +# For periodic builds, you can specify this file for testing against head. + +# `-e` enables the script to automatically fail when a command fails +# `-o pipefail` sets the exit code to the rightmost comment to exit with a non-zero +set -eo pipefail +# Enables `**` to include files nested inside sub-folders +shopt -s globstar + +cd github/python-automl + +exec .kokoro/test-samples-impl.sh diff --git a/.kokoro/test-samples-impl.sh b/.kokoro/test-samples-impl.sh new file mode 100755 index 00000000..cf5de74c --- /dev/null +++ b/.kokoro/test-samples-impl.sh @@ -0,0 +1,102 @@ +#!/bin/bash +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# `-e` enables the script to automatically fail when a command fails +# `-o pipefail` sets the exit code to the rightmost comment to exit with a non-zero +set -eo pipefail +# Enables `**` to include files nested inside sub-folders +shopt -s globstar + +# Exit early if samples directory doesn't exist +if [ ! -d "./samples" ]; then + echo "No tests run. `./samples` not found" + exit 0 +fi + +# Disable buffering, so that the logs stream through. +export PYTHONUNBUFFERED=1 + +# Debug: show build environment +env | grep KOKORO + +# Install nox +python3.6 -m pip install --upgrade --quiet nox + +# Use secrets acessor service account to get secrets +if [[ -f "${KOKORO_GFILE_DIR}/secrets_viewer_service_account.json" ]]; then + gcloud auth activate-service-account \ + --key-file="${KOKORO_GFILE_DIR}/secrets_viewer_service_account.json" \ + --project="cloud-devrel-kokoro-resources" +fi + +# This script will create 3 files: +# - testing/test-env.sh +# - testing/service-account.json +# - testing/client-secrets.json +./scripts/decrypt-secrets.sh + +source ./testing/test-env.sh +export GOOGLE_APPLICATION_CREDENTIALS=$(pwd)/testing/service-account.json + +# For cloud-run session, we activate the service account for gcloud sdk. +gcloud auth activate-service-account \ + --key-file "${GOOGLE_APPLICATION_CREDENTIALS}" + +export GOOGLE_CLIENT_SECRETS=$(pwd)/testing/client-secrets.json + +echo -e "\n******************** TESTING PROJECTS ********************" + +# Switch to 'fail at end' to allow all tests to complete before exiting. +set +e +# Use RTN to return a non-zero value if the test fails. +RTN=0 +ROOT=$(pwd) +# Find all requirements.txt in the samples directory (may break on whitespace). +for file in samples/**/requirements.txt; do + cd "$ROOT" + # Navigate to the project folder. + file=$(dirname "$file") + cd "$file" + + echo "------------------------------------------------------------" + echo "- testing $file" + echo "------------------------------------------------------------" + + # Use nox to execute the tests for the project. + python3.6 -m nox -s "$RUN_TESTS_SESSION" + EXIT=$? + + # If this is a periodic build, send the test log to the FlakyBot. + # See https://github.com/googleapis/repo-automation-bots/tree/master/packages/flakybot. + if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"periodic"* ]]; then + chmod +x $KOKORO_GFILE_DIR/linux_amd64/flakybot + $KOKORO_GFILE_DIR/linux_amd64/flakybot + fi + + if [[ $EXIT -ne 0 ]]; then + RTN=1 + echo -e "\n Testing failed: Nox returned a non-zero exit code. \n" + else + echo -e "\n Testing completed.\n" + fi + +done +cd "$ROOT" + +# Workaround for Kokoro permissions issue: delete secrets +rm testing/{test-env.sh,client-secrets.json,service-account.json} + +exit "$RTN" diff --git a/.kokoro/test-samples.sh b/.kokoro/test-samples.sh index bfda0ed4..dca6fe80 100755 --- a/.kokoro/test-samples.sh +++ b/.kokoro/test-samples.sh @@ -13,6 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. +# The default test runner for samples. +# +# For periodic builds, we rewinds the repo to the latest release, and +# run test-samples-impl.sh. # `-e` enables the script to automatically fail when a command fails # `-o pipefail` sets the exit code to the rightmost comment to exit with a non-zero @@ -24,87 +28,19 @@ cd github/python-automl # Run periodic samples tests at latest release if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"periodic"* ]]; then + # preserving the test runner implementation. + cp .kokoro/test-samples-impl.sh "${TMPDIR}/test-samples-impl.sh" + echo "--- IMPORTANT IMPORTANT IMPORTANT ---" + echo "Now we rewind the repo back to the latest release..." LATEST_RELEASE=$(git describe --abbrev=0 --tags) git checkout $LATEST_RELEASE -fi - -# Exit early if samples directory doesn't exist -if [ ! -d "./samples" ]; then - echo "No tests run. `./samples` not found" - exit 0 -fi - -# Disable buffering, so that the logs stream through. -export PYTHONUNBUFFERED=1 - -# Debug: show build environment -env | grep KOKORO - -# Install nox -python3.6 -m pip install --upgrade --quiet nox - -# Use secrets acessor service account to get secrets -if [[ -f "${KOKORO_GFILE_DIR}/secrets_viewer_service_account.json" ]]; then - gcloud auth activate-service-account \ - --key-file="${KOKORO_GFILE_DIR}/secrets_viewer_service_account.json" \ - --project="cloud-devrel-kokoro-resources" -fi - -# This script will create 3 files: -# - testing/test-env.sh -# - testing/service-account.json -# - testing/client-secrets.json -./scripts/decrypt-secrets.sh - -source ./testing/test-env.sh -export GOOGLE_APPLICATION_CREDENTIALS=$(pwd)/testing/service-account.json - -# For cloud-run session, we activate the service account for gcloud sdk. -gcloud auth activate-service-account \ - --key-file "${GOOGLE_APPLICATION_CREDENTIALS}" - -export GOOGLE_CLIENT_SECRETS=$(pwd)/testing/client-secrets.json - -echo -e "\n******************** TESTING PROJECTS ********************" - -# Switch to 'fail at end' to allow all tests to complete before exiting. -set +e -# Use RTN to return a non-zero value if the test fails. -RTN=0 -ROOT=$(pwd) -# Find all requirements.txt in the samples directory (may break on whitespace). -for file in samples/**/requirements.txt; do - cd "$ROOT" - # Navigate to the project folder. - file=$(dirname "$file") - cd "$file" - - echo "------------------------------------------------------------" - echo "- testing $file" - echo "------------------------------------------------------------" - - # Use nox to execute the tests for the project. - python3.6 -m nox -s "$RUN_TESTS_SESSION" - EXIT=$? - - # If this is a periodic build, send the test log to the FlakyBot. - # See https://github.com/googleapis/repo-automation-bots/tree/master/packages/flakybot. - if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"periodic"* ]]; then - chmod +x $KOKORO_GFILE_DIR/linux_amd64/flakybot - $KOKORO_GFILE_DIR/linux_amd64/flakybot + echo "The current head is: " + echo $(git rev-parse --verify HEAD) + echo "--- IMPORTANT IMPORTANT IMPORTANT ---" + # move back the test runner implementation if there's no file. + if [ ! -f .kokoro/test-samples-impl.sh ]; then + cp "${TMPDIR}/test-samples-impl.sh" .kokoro/test-samples-impl.sh fi +fi - if [[ $EXIT -ne 0 ]]; then - RTN=1 - echo -e "\n Testing failed: Nox returned a non-zero exit code. \n" - else - echo -e "\n Testing completed.\n" - fi - -done -cd "$ROOT" - -# Workaround for Kokoro permissions issue: delete secrets -rm testing/{test-env.sh,client-secrets.json,service-account.json} - -exit "$RTN" +exec .kokoro/test-samples-impl.sh diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a9024b15..32302e48 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -12,6 +12,6 @@ repos: hooks: - id: black - repo: https://gitlab.com/pycqa/flake8 - rev: 3.8.4 + rev: 3.9.0 hooks: - id: flake8 diff --git a/.trampolinerc b/.trampolinerc index 995ee291..383b6ec8 100644 --- a/.trampolinerc +++ b/.trampolinerc @@ -24,6 +24,7 @@ required_envvars+=( pass_down_envvars+=( "STAGING_BUCKET" "V2_STAGING_BUCKET" + "NOX_SESSION" ) # Prevent unintentional override on the default image. diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index f4fcda7b..1c7a094b 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -70,9 +70,14 @@ We use `nox `__ to instrument our tests. - To test your changes, run unit tests with ``nox``:: $ nox -s unit-2.7 - $ nox -s unit-3.7 + $ nox -s unit-3.8 $ ... +- Args to pytest can be passed through the nox command separated by a `--`. For + example, to run a single test:: + + $ nox -s unit-3.8 -- -k + .. note:: The unit tests and system tests are described in the @@ -93,8 +98,12 @@ On Debian/Ubuntu:: ************ Coding Style ************ +- We use the automatic code formatter ``black``. You can run it using + the nox session ``blacken``. This will eliminate many lint errors. Run via:: + + $ nox -s blacken -- PEP8 compliance, with exceptions defined in the linter configuration. +- PEP8 compliance is required, with exceptions defined in the linter configuration. If you have ``nox`` installed, you can test that you have not introduced any non-compliant code via:: @@ -133,13 +142,18 @@ Running System Tests - To run system tests, you can execute:: - $ nox -s system-3.7 + # Run all system tests + $ nox -s system-3.8 $ nox -s system-2.7 + # Run a single system test + $ nox -s system-3.8 -- -k + + .. note:: System tests are only configured to run under Python 2.7 and - Python 3.7. For expediency, we do not run them in older versions + Python 3.8. For expediency, we do not run them in older versions of Python 3. This alone will not run the tests. You'll need to change some local diff --git a/LICENSE b/LICENSE index a8ee855d..d6456956 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,7 @@ - Apache License + + Apache License Version 2.0, January 2004 - https://www.apache.org/licenses/ + http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION @@ -192,7 +193,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/MANIFEST.in b/MANIFEST.in index e9e29d12..e783f4c6 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -16,10 +16,10 @@ # Generated by synthtool. DO NOT EDIT! include README.rst LICENSE -recursive-include google *.json *.proto +recursive-include google *.json *.proto py.typed recursive-include tests * global-exclude *.py[co] global-exclude __pycache__ # Exclude scripts for samples readmegen -prune scripts/readme-gen \ No newline at end of file +prune scripts/readme-gen diff --git a/UPGRADING.md b/UPGRADING.md index d7b3ec05..1275172c 100644 --- a/UPGRADING.md +++ b/UPGRADING.md @@ -17,10 +17,10 @@ The 2.0.0 release requires Python 3.6+. Methods expect request objects. We provide a script that will convert most common use cases. -* Install the library +* Install the library with `libcst`. ```py -python3 -m pip install google-cloud-automl +python3 -m pip install google-cloud-automl[libcst] ``` * The script `fixup_automl_{version}_keywords.py` is shipped with the library. It expects @@ -266,4 +266,4 @@ model_path = f"projects/{project}/locations/{location}/models/{model}" # alternatively you can use `model_path` from AutoMlClient model_path = automl.AutoMlClient.model_path(project_id, location, model_id) -``` \ No newline at end of file +``` diff --git a/docs/_static/custom.css b/docs/_static/custom.css index 0abaf229..bcd37bbd 100644 --- a/docs/_static/custom.css +++ b/docs/_static/custom.css @@ -1,4 +1,9 @@ div#python2-eol { border-color: red; border-width: medium; -} \ No newline at end of file +} + +/* Ensure minimum width for 'Parameters' / 'Returns' column */ +dl.field-list > dt { + min-width: 100px +} diff --git a/docs/automl_v1/auto_ml.rst b/docs/automl_v1/auto_ml.rst new file mode 100644 index 00000000..224ff9cc --- /dev/null +++ b/docs/automl_v1/auto_ml.rst @@ -0,0 +1,11 @@ +AutoMl +------------------------ + +.. automodule:: google.cloud.automl_v1.services.auto_ml + :members: + :inherited-members: + + +.. automodule:: google.cloud.automl_v1.services.auto_ml.pagers + :members: + :inherited-members: diff --git a/docs/automl_v1/prediction_service.rst b/docs/automl_v1/prediction_service.rst new file mode 100644 index 00000000..d8f6da92 --- /dev/null +++ b/docs/automl_v1/prediction_service.rst @@ -0,0 +1,6 @@ +PredictionService +----------------------------------- + +.. automodule:: google.cloud.automl_v1.services.prediction_service + :members: + :inherited-members: diff --git a/docs/automl_v1/services.rst b/docs/automl_v1/services.rst index b57ca45e..ce8e2c3d 100644 --- a/docs/automl_v1/services.rst +++ b/docs/automl_v1/services.rst @@ -1,9 +1,7 @@ Services for Google Cloud Automl v1 API ======================================= +.. toctree:: + :maxdepth: 2 -.. automodule:: google.cloud.automl_v1.services.auto_ml - :members: - :inherited-members: -.. automodule:: google.cloud.automl_v1.services.prediction_service - :members: - :inherited-members: + auto_ml + prediction_service diff --git a/docs/automl_v1/types.rst b/docs/automl_v1/types.rst index 14a31a9e..6a8eefe0 100644 --- a/docs/automl_v1/types.rst +++ b/docs/automl_v1/types.rst @@ -3,4 +3,5 @@ Types for Google Cloud Automl v1 API .. automodule:: google.cloud.automl_v1.types :members: + :undoc-members: :show-inheritance: diff --git a/docs/automl_v1beta1/auto_ml.rst b/docs/automl_v1beta1/auto_ml.rst new file mode 100644 index 00000000..80a90a4e --- /dev/null +++ b/docs/automl_v1beta1/auto_ml.rst @@ -0,0 +1,11 @@ +AutoMl +------------------------ + +.. automodule:: google.cloud.automl_v1beta1.services.auto_ml + :members: + :inherited-members: + + +.. automodule:: google.cloud.automl_v1beta1.services.auto_ml.pagers + :members: + :inherited-members: diff --git a/docs/automl_v1beta1/prediction_service.rst b/docs/automl_v1beta1/prediction_service.rst new file mode 100644 index 00000000..e234e69f --- /dev/null +++ b/docs/automl_v1beta1/prediction_service.rst @@ -0,0 +1,6 @@ +PredictionService +----------------------------------- + +.. automodule:: google.cloud.automl_v1beta1.services.prediction_service + :members: + :inherited-members: diff --git a/docs/automl_v1beta1/services.rst b/docs/automl_v1beta1/services.rst index 511f02ad..ebd9c7c8 100644 --- a/docs/automl_v1beta1/services.rst +++ b/docs/automl_v1beta1/services.rst @@ -1,12 +1,7 @@ Services for Google Cloud Automl v1beta1 API ============================================ +.. toctree:: + :maxdepth: 2 -.. automodule:: google.cloud.automl_v1beta1.services.auto_ml - :members: - :inherited-members: -.. automodule:: google.cloud.automl_v1beta1.services.prediction_service - :members: - :inherited-members: -.. automodule:: google.cloud.automl_v1beta1.services.tables - :members: - :inherited-members: + auto_ml + prediction_service diff --git a/docs/automl_v1beta1/types.rst b/docs/automl_v1beta1/types.rst index b50b55f6..ab8d456a 100644 --- a/docs/automl_v1beta1/types.rst +++ b/docs/automl_v1beta1/types.rst @@ -3,4 +3,5 @@ Types for Google Cloud Automl v1beta1 API .. automodule:: google.cloud.automl_v1beta1.types :members: + :undoc-members: :show-inheritance: diff --git a/google/cloud/automl_v1/__init__.py b/google/cloud/automl_v1/__init__.py index 6f22bb65..b5f76f81 100644 --- a/google/cloud/automl_v1/__init__.py +++ b/google/cloud/automl_v1/__init__.py @@ -104,7 +104,6 @@ __all__ = ( "AnnotationPayload", "AnnotationSpec", - "AutoMlClient", "BatchPredictInputConfig", "BatchPredictOperationMetadata", "BatchPredictOutputConfig", @@ -165,6 +164,7 @@ "OutputConfig", "PredictRequest", "PredictResponse", + "PredictionServiceClient", "TextClassificationDatasetMetadata", "TextClassificationModelMetadata", "TextExtractionAnnotation", @@ -185,5 +185,5 @@ "UndeployModelRequest", "UpdateDatasetRequest", "UpdateModelRequest", - "PredictionServiceClient", + "AutoMlClient", ) diff --git a/google/cloud/automl_v1/services/auto_ml/async_client.py b/google/cloud/automl_v1/services/auto_ml/async_client.py index 254f5cc5..85829e6b 100644 --- a/google/cloud/automl_v1/services/auto_ml/async_client.py +++ b/google/cloud/automl_v1/services/auto_ml/async_client.py @@ -107,7 +107,36 @@ class AutoMlAsyncClient: common_location_path = staticmethod(AutoMlClient.common_location_path) parse_common_location_path = staticmethod(AutoMlClient.parse_common_location_path) - from_service_account_file = AutoMlClient.from_service_account_file + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + AutoMlAsyncClient: The constructed client. + """ + return AutoMlClient.from_service_account_info.__func__(AutoMlAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + AutoMlAsyncClient: The constructed client. + """ + return AutoMlClient.from_service_account_file.__func__(AutoMlAsyncClient, filename, *args, **kwargs) # type: ignore + from_service_account_json = from_service_account_file @property @@ -184,16 +213,17 @@ async def create_dataset( r"""Creates a dataset. Args: - request (:class:`~.service.CreateDatasetRequest`): + request (:class:`google.cloud.automl_v1.types.CreateDatasetRequest`): The request object. Request message for [AutoMl.CreateDataset][google.cloud.automl.v1.AutoMl.CreateDataset]. parent (:class:`str`): Required. The resource name of the project to create the dataset for. + This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - dataset (:class:`~.gca_dataset.Dataset`): + dataset (:class:`google.cloud.automl_v1.types.Dataset`): Required. The dataset to create. This corresponds to the ``dataset`` field on the ``request`` instance; if ``request`` is provided, this @@ -206,14 +236,11 @@ async def create_dataset( sent along with the request as metadata. Returns: - ~.operation_async.AsyncOperation: + google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.gca_dataset.Dataset``: A workspace for - solving a single, particular machine learning (ML) - problem. A workspace contains examples that may be - annotated. + The result type for the operation will be :class:`google.cloud.automl_v1.types.Dataset` A workspace for solving a single, particular machine learning (ML) problem. + A workspace contains examples that may be annotated. """ # Create or coerce a protobuf request object. @@ -276,12 +303,13 @@ async def get_dataset( r"""Gets a dataset. Args: - request (:class:`~.service.GetDatasetRequest`): + request (:class:`google.cloud.automl_v1.types.GetDatasetRequest`): The request object. Request message for [AutoMl.GetDataset][google.cloud.automl.v1.AutoMl.GetDataset]. name (:class:`str`): Required. The resource name of the dataset to retrieve. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -293,7 +321,7 @@ async def get_dataset( sent along with the request as metadata. Returns: - ~.dataset.Dataset: + google.cloud.automl_v1.types.Dataset: A workspace for solving a single, particular machine learning (ML) problem. A workspace contains examples @@ -329,6 +357,7 @@ async def get_dataset( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=5.0, ), default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, @@ -358,12 +387,13 @@ async def list_datasets( r"""Lists datasets in a project. Args: - request (:class:`~.service.ListDatasetsRequest`): + request (:class:`google.cloud.automl_v1.types.ListDatasetsRequest`): The request object. Request message for [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets]. parent (:class:`str`): Required. The resource name of the project from which to list datasets. + This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -375,7 +405,7 @@ async def list_datasets( sent along with the request as metadata. Returns: - ~.pagers.ListDatasetsAsyncPager: + google.cloud.automl_v1.services.auto_ml.pagers.ListDatasetsAsyncPager: Response message for [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets]. @@ -412,6 +442,7 @@ async def list_datasets( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=5.0, ), default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, @@ -448,18 +479,20 @@ async def update_dataset( r"""Updates a dataset. Args: - request (:class:`~.service.UpdateDatasetRequest`): + request (:class:`google.cloud.automl_v1.types.UpdateDatasetRequest`): The request object. Request message for [AutoMl.UpdateDataset][google.cloud.automl.v1.AutoMl.UpdateDataset] - dataset (:class:`~.gca_dataset.Dataset`): + dataset (:class:`google.cloud.automl_v1.types.Dataset`): Required. The dataset which replaces the resource on the server. + This corresponds to the ``dataset`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - update_mask (:class:`~.field_mask.FieldMask`): + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): Required. The update mask applies to the resource. + This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -471,7 +504,7 @@ async def update_dataset( sent along with the request as metadata. Returns: - ~.gca_dataset.Dataset: + google.cloud.automl_v1.types.Dataset: A workspace for solving a single, particular machine learning (ML) problem. A workspace contains examples @@ -536,12 +569,13 @@ async def delete_dataset( [metadata][google.longrunning.Operation.metadata] field. Args: - request (:class:`~.service.DeleteDatasetRequest`): + request (:class:`google.cloud.automl_v1.types.DeleteDatasetRequest`): The request object. Request message for [AutoMl.DeleteDataset][google.cloud.automl.v1.AutoMl.DeleteDataset]. name (:class:`str`): Required. The resource name of the dataset to delete. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -553,24 +587,22 @@ async def delete_dataset( sent along with the request as metadata. Returns: - ~.operation_async.AsyncOperation: + google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that - you can re-use to avoid defining duplicated empty - messages in your APIs. A typical example is to use it as - the request or the response type of an API method. For - instance: + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: - :: + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); - service Foo { - rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); - } + } - The JSON representation for ``Empty`` is empty JSON - object ``{}``. + The JSON representation for Empty is empty JSON + object {}. """ # Create or coerce a protobuf request object. @@ -602,6 +634,7 @@ async def delete_dataset( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=5.0, ), default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, @@ -649,20 +682,22 @@ async def import_data( field when it completes. Args: - request (:class:`~.service.ImportDataRequest`): + request (:class:`google.cloud.automl_v1.types.ImportDataRequest`): The request object. Request message for [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData]. name (:class:`str`): Required. Dataset name. Dataset must already exist. All imported annotations and examples will be added. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - input_config (:class:`~.io.InputConfig`): + input_config (:class:`google.cloud.automl_v1.types.InputConfig`): Required. The desired input location and its domain specific semantics, if any. + This corresponds to the ``input_config`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -674,24 +709,22 @@ async def import_data( sent along with the request as metadata. Returns: - ~.operation_async.AsyncOperation: + google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that - you can re-use to avoid defining duplicated empty - messages in your APIs. A typical example is to use it as - the request or the response type of an API method. For - instance: + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: - :: + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); - service Foo { - rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); - } + } - The JSON representation for ``Empty`` is empty JSON - object ``{}``. + The JSON representation for Empty is empty JSON + object {}. """ # Create or coerce a protobuf request object. @@ -758,18 +791,20 @@ async def export_data( completes. Args: - request (:class:`~.service.ExportDataRequest`): + request (:class:`google.cloud.automl_v1.types.ExportDataRequest`): The request object. Request message for [AutoMl.ExportData][google.cloud.automl.v1.AutoMl.ExportData]. name (:class:`str`): Required. The resource name of the dataset. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - output_config (:class:`~.io.OutputConfig`): + output_config (:class:`google.cloud.automl_v1.types.OutputConfig`): Required. The desired output location. + This corresponds to the ``output_config`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -781,24 +816,22 @@ async def export_data( sent along with the request as metadata. Returns: - ~.operation_async.AsyncOperation: + google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that - you can re-use to avoid defining duplicated empty - messages in your APIs. A typical example is to use it as - the request or the response type of an API method. For - instance: + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: - :: + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); - service Foo { - rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); - } + } - The JSON representation for ``Empty`` is empty JSON - object ``{}``. + The JSON representation for Empty is empty JSON + object {}. """ # Create or coerce a protobuf request object. @@ -861,12 +894,13 @@ async def get_annotation_spec( r"""Gets an annotation spec. Args: - request (:class:`~.service.GetAnnotationSpecRequest`): + request (:class:`google.cloud.automl_v1.types.GetAnnotationSpecRequest`): The request object. Request message for [AutoMl.GetAnnotationSpec][google.cloud.automl.v1.AutoMl.GetAnnotationSpec]. name (:class:`str`): Required. The resource name of the annotation spec to retrieve. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -878,7 +912,7 @@ async def get_annotation_spec( sent along with the request as metadata. Returns: - ~.annotation_spec.AnnotationSpec: + google.cloud.automl_v1.types.AnnotationSpec: A definition of an annotation spec. """ # Create or coerce a protobuf request object. @@ -910,6 +944,7 @@ async def get_annotation_spec( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=5.0, ), default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, @@ -944,17 +979,18 @@ async def create_model( each annotation spec. Args: - request (:class:`~.service.CreateModelRequest`): + request (:class:`google.cloud.automl_v1.types.CreateModelRequest`): The request object. Request message for [AutoMl.CreateModel][google.cloud.automl.v1.AutoMl.CreateModel]. parent (:class:`str`): Required. Resource name of the parent project where the model is being created. + This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - model (:class:`~.gca_model.Model`): + model (:class:`google.cloud.automl_v1.types.Model`): Required. The model to create. This corresponds to the ``model`` field on the ``request`` instance; if ``request`` is provided, this @@ -967,12 +1003,12 @@ async def create_model( sent along with the request as metadata. Returns: - ~.operation_async.AsyncOperation: + google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. The result type for the operation will be - :class:``~.gca_model.Model``: API proto representing a - trained machine learning model. + :class:`google.cloud.automl_v1.types.Model` API proto + representing a trained machine learning model. """ # Create or coerce a protobuf request object. @@ -1035,7 +1071,7 @@ async def get_model( r"""Gets a model. Args: - request (:class:`~.service.GetModelRequest`): + request (:class:`google.cloud.automl_v1.types.GetModelRequest`): The request object. Request message for [AutoMl.GetModel][google.cloud.automl.v1.AutoMl.GetModel]. name (:class:`str`): @@ -1051,7 +1087,7 @@ async def get_model( sent along with the request as metadata. Returns: - ~.model.Model: + google.cloud.automl_v1.types.Model: API proto representing a trained machine learning model. @@ -1085,6 +1121,7 @@ async def get_model( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=5.0, ), default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, @@ -1114,12 +1151,13 @@ async def list_models( r"""Lists models. Args: - request (:class:`~.service.ListModelsRequest`): + request (:class:`google.cloud.automl_v1.types.ListModelsRequest`): The request object. Request message for [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels]. parent (:class:`str`): Required. Resource name of the project, from which to list the models. + This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -1131,7 +1169,7 @@ async def list_models( sent along with the request as metadata. Returns: - ~.pagers.ListModelsAsyncPager: + google.cloud.automl_v1.services.auto_ml.pagers.ListModelsAsyncPager: Response message for [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels]. @@ -1168,6 +1206,7 @@ async def list_models( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=5.0, ), default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, @@ -1206,12 +1245,13 @@ async def delete_model( [metadata][google.longrunning.Operation.metadata] field. Args: - request (:class:`~.service.DeleteModelRequest`): + request (:class:`google.cloud.automl_v1.types.DeleteModelRequest`): The request object. Request message for [AutoMl.DeleteModel][google.cloud.automl.v1.AutoMl.DeleteModel]. name (:class:`str`): Required. Resource name of the model being deleted. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -1223,24 +1263,22 @@ async def delete_model( sent along with the request as metadata. Returns: - ~.operation_async.AsyncOperation: + google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that - you can re-use to avoid defining duplicated empty - messages in your APIs. A typical example is to use it as - the request or the response type of an API method. For - instance: + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: - :: + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); - service Foo { - rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); - } + } - The JSON representation for ``Empty`` is empty JSON - object ``{}``. + The JSON representation for Empty is empty JSON + object {}. """ # Create or coerce a protobuf request object. @@ -1272,6 +1310,7 @@ async def delete_model( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=5.0, ), default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, @@ -1310,18 +1349,20 @@ async def update_model( r"""Updates a model. Args: - request (:class:`~.service.UpdateModelRequest`): + request (:class:`google.cloud.automl_v1.types.UpdateModelRequest`): The request object. Request message for [AutoMl.UpdateModel][google.cloud.automl.v1.AutoMl.UpdateModel] - model (:class:`~.gca_model.Model`): + model (:class:`google.cloud.automl_v1.types.Model`): Required. The model which replaces the resource on the server. + This corresponds to the ``model`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - update_mask (:class:`~.field_mask.FieldMask`): + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): Required. The update mask applies to the resource. + This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -1333,7 +1374,7 @@ async def update_model( sent along with the request as metadata. Returns: - ~.gca_model.Model: + google.cloud.automl_v1.types.Model: API proto representing a trained machine learning model. @@ -1406,12 +1447,13 @@ async def deploy_model( completes. Args: - request (:class:`~.service.DeployModelRequest`): + request (:class:`google.cloud.automl_v1.types.DeployModelRequest`): The request object. Request message for [AutoMl.DeployModel][google.cloud.automl.v1.AutoMl.DeployModel]. name (:class:`str`): Required. Resource name of the model to deploy. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -1423,24 +1465,22 @@ async def deploy_model( sent along with the request as metadata. Returns: - ~.operation_async.AsyncOperation: + google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that - you can re-use to avoid defining duplicated empty - messages in your APIs. A typical example is to use it as - the request or the response type of an API method. For - instance: + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: - :: + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); - service Foo { - rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); - } + } - The JSON representation for ``Empty`` is empty JSON - object ``{}``. + The JSON representation for Empty is empty JSON + object {}. """ # Create or coerce a protobuf request object. @@ -1509,12 +1549,13 @@ async def undeploy_model( completes. Args: - request (:class:`~.service.UndeployModelRequest`): + request (:class:`google.cloud.automl_v1.types.UndeployModelRequest`): The request object. Request message for [AutoMl.UndeployModel][google.cloud.automl.v1.AutoMl.UndeployModel]. name (:class:`str`): Required. Resource name of the model to undeploy. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -1526,24 +1567,22 @@ async def undeploy_model( sent along with the request as metadata. Returns: - ~.operation_async.AsyncOperation: + google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that - you can re-use to avoid defining duplicated empty - messages in your APIs. A typical example is to use it as - the request or the response type of an API method. For - instance: + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: - :: + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); - service Foo { - rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); - } + } - The JSON representation for ``Empty`` is empty JSON - object ``{}``. + The JSON representation for Empty is empty JSON + object {}. """ # Create or coerce a protobuf request object. @@ -1612,7 +1651,7 @@ async def export_model( completes. Args: - request (:class:`~.service.ExportModelRequest`): + request (:class:`google.cloud.automl_v1.types.ExportModelRequest`): The request object. Request message for [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]. Models need to be enabled for exporting, otherwise an @@ -1620,12 +1659,14 @@ async def export_model( name (:class:`str`): Required. The resource name of the model to export. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - output_config (:class:`~.io.ModelExportOutputConfig`): + output_config (:class:`google.cloud.automl_v1.types.ModelExportOutputConfig`): Required. The desired output location and configuration. + This corresponds to the ``output_config`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -1637,24 +1678,22 @@ async def export_model( sent along with the request as metadata. Returns: - ~.operation_async.AsyncOperation: + google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that - you can re-use to avoid defining duplicated empty - messages in your APIs. A typical example is to use it as - the request or the response type of an API method. For - instance: + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: - :: + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); - service Foo { - rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); - } + } - The JSON representation for ``Empty`` is empty JSON - object ``{}``. + The JSON representation for Empty is empty JSON + object {}. """ # Create or coerce a protobuf request object. @@ -1717,12 +1756,13 @@ async def get_model_evaluation( r"""Gets a model evaluation. Args: - request (:class:`~.service.GetModelEvaluationRequest`): + request (:class:`google.cloud.automl_v1.types.GetModelEvaluationRequest`): The request object. Request message for [AutoMl.GetModelEvaluation][google.cloud.automl.v1.AutoMl.GetModelEvaluation]. name (:class:`str`): Required. Resource name for the model evaluation. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -1734,7 +1774,7 @@ async def get_model_evaluation( sent along with the request as metadata. Returns: - ~.model_evaluation.ModelEvaluation: + google.cloud.automl_v1.types.ModelEvaluation: Evaluation results of a model. """ # Create or coerce a protobuf request object. @@ -1766,6 +1806,7 @@ async def get_model_evaluation( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=5.0, ), default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, @@ -1796,7 +1837,7 @@ async def list_model_evaluations( r"""Lists model evaluations. Args: - request (:class:`~.service.ListModelEvaluationsRequest`): + request (:class:`google.cloud.automl_v1.types.ListModelEvaluationsRequest`): The request object. Request message for [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations]. parent (:class:`str`): @@ -1805,6 +1846,7 @@ async def list_model_evaluations( modelId is set as "-", this will list model evaluations from across all models of the parent location. + This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -1822,6 +1864,7 @@ async def list_model_evaluations( 4. - ``NOT annotation_spec_id:*`` --> The model evaluation was done for aggregate of all annotation specs. + This corresponds to the ``filter`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -1833,7 +1876,7 @@ async def list_model_evaluations( sent along with the request as metadata. Returns: - ~.pagers.ListModelEvaluationsAsyncPager: + google.cloud.automl_v1.services.auto_ml.pagers.ListModelEvaluationsAsyncPager: Response message for [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations]. @@ -1872,6 +1915,7 @@ async def list_model_evaluations( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=5.0, ), default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, diff --git a/google/cloud/automl_v1/services/auto_ml/client.py b/google/cloud/automl_v1/services/auto_ml/client.py index 0b860787..faebcf8f 100644 --- a/google/cloud/automl_v1/services/auto_ml/client.py +++ b/google/cloud/automl_v1/services/auto_ml/client.py @@ -143,6 +143,22 @@ def _get_default_mtls_endpoint(api_endpoint): DEFAULT_ENDPOINT ) + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + AutoMlClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials @@ -155,7 +171,7 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): kwargs: Additional arguments to pass to the constructor. Returns: - {@api.name}: The constructed client. + AutoMlClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_file(filename) kwargs["credentials"] = credentials @@ -321,10 +337,10 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - transport (Union[str, ~.AutoMlTransport]): The + transport (Union[str, AutoMlTransport]): The transport to use. If set to None, a transport is chosen automatically. - client_options (client_options_lib.ClientOptions): Custom options for the + client_options (google.api_core.client_options.ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT @@ -360,21 +376,17 @@ def __init__( util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) ) - ssl_credentials = None + client_cert_source_func = None is_mtls = False if use_client_cert: if client_options.client_cert_source: - import grpc # type: ignore - - cert, key = client_options.client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) is_mtls = True + client_cert_source_func = client_options.client_cert_source else: - creds = SslCredentials() - is_mtls = creds.is_mtls - ssl_credentials = creds.ssl_credentials if is_mtls else None + is_mtls = mtls.has_default_client_cert_source() + client_cert_source_func = ( + mtls.default_client_cert_source() if is_mtls else None + ) # Figure out which api endpoint to use. if client_options.api_endpoint is not None: @@ -417,7 +429,7 @@ def __init__( credentials_file=client_options.credentials_file, host=api_endpoint, scopes=client_options.scopes, - ssl_channel_credentials=ssl_credentials, + client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, ) @@ -435,16 +447,17 @@ def create_dataset( r"""Creates a dataset. Args: - request (:class:`~.service.CreateDatasetRequest`): + request (google.cloud.automl_v1.types.CreateDatasetRequest): The request object. Request message for [AutoMl.CreateDataset][google.cloud.automl.v1.AutoMl.CreateDataset]. - parent (:class:`str`): + parent (str): Required. The resource name of the project to create the dataset for. + This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - dataset (:class:`~.gca_dataset.Dataset`): + dataset (google.cloud.automl_v1.types.Dataset): Required. The dataset to create. This corresponds to the ``dataset`` field on the ``request`` instance; if ``request`` is provided, this @@ -457,14 +470,11 @@ def create_dataset( sent along with the request as metadata. Returns: - ~.operation.Operation: + google.api_core.operation.Operation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.gca_dataset.Dataset``: A workspace for - solving a single, particular machine learning (ML) - problem. A workspace contains examples that may be - annotated. + The result type for the operation will be :class:`google.cloud.automl_v1.types.Dataset` A workspace for solving a single, particular machine learning (ML) problem. + A workspace contains examples that may be annotated. """ # Create or coerce a protobuf request object. @@ -528,12 +538,13 @@ def get_dataset( r"""Gets a dataset. Args: - request (:class:`~.service.GetDatasetRequest`): + request (google.cloud.automl_v1.types.GetDatasetRequest): The request object. Request message for [AutoMl.GetDataset][google.cloud.automl.v1.AutoMl.GetDataset]. - name (:class:`str`): + name (str): Required. The resource name of the dataset to retrieve. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -545,7 +556,7 @@ def get_dataset( sent along with the request as metadata. Returns: - ~.dataset.Dataset: + google.cloud.automl_v1.types.Dataset: A workspace for solving a single, particular machine learning (ML) problem. A workspace contains examples @@ -603,12 +614,13 @@ def list_datasets( r"""Lists datasets in a project. Args: - request (:class:`~.service.ListDatasetsRequest`): + request (google.cloud.automl_v1.types.ListDatasetsRequest): The request object. Request message for [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets]. - parent (:class:`str`): + parent (str): Required. The resource name of the project from which to list datasets. + This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -620,7 +632,7 @@ def list_datasets( sent along with the request as metadata. Returns: - ~.pagers.ListDatasetsPager: + google.cloud.automl_v1.services.auto_ml.pagers.ListDatasetsPager: Response message for [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets]. @@ -686,18 +698,20 @@ def update_dataset( r"""Updates a dataset. Args: - request (:class:`~.service.UpdateDatasetRequest`): + request (google.cloud.automl_v1.types.UpdateDatasetRequest): The request object. Request message for [AutoMl.UpdateDataset][google.cloud.automl.v1.AutoMl.UpdateDataset] - dataset (:class:`~.gca_dataset.Dataset`): + dataset (google.cloud.automl_v1.types.Dataset): Required. The dataset which replaces the resource on the server. + This corresponds to the ``dataset`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - update_mask (:class:`~.field_mask.FieldMask`): + update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. The update mask applies to the resource. + This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -709,7 +723,7 @@ def update_dataset( sent along with the request as metadata. Returns: - ~.gca_dataset.Dataset: + google.cloud.automl_v1.types.Dataset: A workspace for solving a single, particular machine learning (ML) problem. A workspace contains examples @@ -775,12 +789,13 @@ def delete_dataset( [metadata][google.longrunning.Operation.metadata] field. Args: - request (:class:`~.service.DeleteDatasetRequest`): + request (google.cloud.automl_v1.types.DeleteDatasetRequest): The request object. Request message for [AutoMl.DeleteDataset][google.cloud.automl.v1.AutoMl.DeleteDataset]. - name (:class:`str`): + name (str): Required. The resource name of the dataset to delete. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -792,24 +807,22 @@ def delete_dataset( sent along with the request as metadata. Returns: - ~.operation.Operation: + google.api_core.operation.Operation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that - you can re-use to avoid defining duplicated empty - messages in your APIs. A typical example is to use it as - the request or the response type of an API method. For - instance: + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: - :: + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); - service Foo { - rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); - } + } - The JSON representation for ``Empty`` is empty JSON - object ``{}``. + The JSON representation for Empty is empty JSON + object {}. """ # Create or coerce a protobuf request object. @@ -881,20 +894,22 @@ def import_data( field when it completes. Args: - request (:class:`~.service.ImportDataRequest`): + request (google.cloud.automl_v1.types.ImportDataRequest): The request object. Request message for [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData]. - name (:class:`str`): + name (str): Required. Dataset name. Dataset must already exist. All imported annotations and examples will be added. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - input_config (:class:`~.io.InputConfig`): + input_config (google.cloud.automl_v1.types.InputConfig): Required. The desired input location and its domain specific semantics, if any. + This corresponds to the ``input_config`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -906,24 +921,22 @@ def import_data( sent along with the request as metadata. Returns: - ~.operation.Operation: + google.api_core.operation.Operation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that - you can re-use to avoid defining duplicated empty - messages in your APIs. A typical example is to use it as - the request or the response type of an API method. For - instance: + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: - :: + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); - service Foo { - rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); - } + } - The JSON representation for ``Empty`` is empty JSON - object ``{}``. + The JSON representation for Empty is empty JSON + object {}. """ # Create or coerce a protobuf request object. @@ -991,18 +1004,20 @@ def export_data( completes. Args: - request (:class:`~.service.ExportDataRequest`): + request (google.cloud.automl_v1.types.ExportDataRequest): The request object. Request message for [AutoMl.ExportData][google.cloud.automl.v1.AutoMl.ExportData]. - name (:class:`str`): + name (str): Required. The resource name of the dataset. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - output_config (:class:`~.io.OutputConfig`): + output_config (google.cloud.automl_v1.types.OutputConfig): Required. The desired output location. + This corresponds to the ``output_config`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -1014,24 +1029,22 @@ def export_data( sent along with the request as metadata. Returns: - ~.operation.Operation: + google.api_core.operation.Operation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that - you can re-use to avoid defining duplicated empty - messages in your APIs. A typical example is to use it as - the request or the response type of an API method. For - instance: + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: - :: + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); - service Foo { - rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); - } + } - The JSON representation for ``Empty`` is empty JSON - object ``{}``. + The JSON representation for Empty is empty JSON + object {}. """ # Create or coerce a protobuf request object. @@ -1095,12 +1108,13 @@ def get_annotation_spec( r"""Gets an annotation spec. Args: - request (:class:`~.service.GetAnnotationSpecRequest`): + request (google.cloud.automl_v1.types.GetAnnotationSpecRequest): The request object. Request message for [AutoMl.GetAnnotationSpec][google.cloud.automl.v1.AutoMl.GetAnnotationSpec]. - name (:class:`str`): + name (str): Required. The resource name of the annotation spec to retrieve. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -1112,7 +1126,7 @@ def get_annotation_spec( sent along with the request as metadata. Returns: - ~.annotation_spec.AnnotationSpec: + google.cloud.automl_v1.types.AnnotationSpec: A definition of an annotation spec. """ # Create or coerce a protobuf request object. @@ -1171,17 +1185,18 @@ def create_model( each annotation spec. Args: - request (:class:`~.service.CreateModelRequest`): + request (google.cloud.automl_v1.types.CreateModelRequest): The request object. Request message for [AutoMl.CreateModel][google.cloud.automl.v1.AutoMl.CreateModel]. - parent (:class:`str`): + parent (str): Required. Resource name of the parent project where the model is being created. + This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - model (:class:`~.gca_model.Model`): + model (google.cloud.automl_v1.types.Model): Required. The model to create. This corresponds to the ``model`` field on the ``request`` instance; if ``request`` is provided, this @@ -1194,12 +1209,12 @@ def create_model( sent along with the request as metadata. Returns: - ~.operation.Operation: + google.api_core.operation.Operation: An object representing a long-running operation. The result type for the operation will be - :class:``~.gca_model.Model``: API proto representing a - trained machine learning model. + :class:`google.cloud.automl_v1.types.Model` API proto + representing a trained machine learning model. """ # Create or coerce a protobuf request object. @@ -1263,10 +1278,10 @@ def get_model( r"""Gets a model. Args: - request (:class:`~.service.GetModelRequest`): + request (google.cloud.automl_v1.types.GetModelRequest): The request object. Request message for [AutoMl.GetModel][google.cloud.automl.v1.AutoMl.GetModel]. - name (:class:`str`): + name (str): Required. Resource name of the model. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this @@ -1279,7 +1294,7 @@ def get_model( sent along with the request as metadata. Returns: - ~.model.Model: + google.cloud.automl_v1.types.Model: API proto representing a trained machine learning model. @@ -1335,12 +1350,13 @@ def list_models( r"""Lists models. Args: - request (:class:`~.service.ListModelsRequest`): + request (google.cloud.automl_v1.types.ListModelsRequest): The request object. Request message for [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels]. - parent (:class:`str`): + parent (str): Required. Resource name of the project, from which to list the models. + This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -1352,7 +1368,7 @@ def list_models( sent along with the request as metadata. Returns: - ~.pagers.ListModelsPager: + google.cloud.automl_v1.services.auto_ml.pagers.ListModelsPager: Response message for [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels]. @@ -1420,12 +1436,13 @@ def delete_model( [metadata][google.longrunning.Operation.metadata] field. Args: - request (:class:`~.service.DeleteModelRequest`): + request (google.cloud.automl_v1.types.DeleteModelRequest): The request object. Request message for [AutoMl.DeleteModel][google.cloud.automl.v1.AutoMl.DeleteModel]. - name (:class:`str`): + name (str): Required. Resource name of the model being deleted. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -1437,24 +1454,22 @@ def delete_model( sent along with the request as metadata. Returns: - ~.operation.Operation: + google.api_core.operation.Operation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that - you can re-use to avoid defining duplicated empty - messages in your APIs. A typical example is to use it as - the request or the response type of an API method. For - instance: + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: - :: + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); - service Foo { - rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); - } + } - The JSON representation for ``Empty`` is empty JSON - object ``{}``. + The JSON representation for Empty is empty JSON + object {}. """ # Create or coerce a protobuf request object. @@ -1517,18 +1532,20 @@ def update_model( r"""Updates a model. Args: - request (:class:`~.service.UpdateModelRequest`): + request (google.cloud.automl_v1.types.UpdateModelRequest): The request object. Request message for [AutoMl.UpdateModel][google.cloud.automl.v1.AutoMl.UpdateModel] - model (:class:`~.gca_model.Model`): + model (google.cloud.automl_v1.types.Model): Required. The model which replaces the resource on the server. + This corresponds to the ``model`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - update_mask (:class:`~.field_mask.FieldMask`): + update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. The update mask applies to the resource. + This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -1540,7 +1557,7 @@ def update_model( sent along with the request as metadata. Returns: - ~.gca_model.Model: + google.cloud.automl_v1.types.Model: API proto representing a trained machine learning model. @@ -1614,12 +1631,13 @@ def deploy_model( completes. Args: - request (:class:`~.service.DeployModelRequest`): + request (google.cloud.automl_v1.types.DeployModelRequest): The request object. Request message for [AutoMl.DeployModel][google.cloud.automl.v1.AutoMl.DeployModel]. - name (:class:`str`): + name (str): Required. Resource name of the model to deploy. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -1631,24 +1649,22 @@ def deploy_model( sent along with the request as metadata. Returns: - ~.operation.Operation: + google.api_core.operation.Operation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that - you can re-use to avoid defining duplicated empty - messages in your APIs. A typical example is to use it as - the request or the response type of an API method. For - instance: + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: - :: + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); - service Foo { - rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); - } + } - The JSON representation for ``Empty`` is empty JSON - object ``{}``. + The JSON representation for Empty is empty JSON + object {}. """ # Create or coerce a protobuf request object. @@ -1718,12 +1734,13 @@ def undeploy_model( completes. Args: - request (:class:`~.service.UndeployModelRequest`): + request (google.cloud.automl_v1.types.UndeployModelRequest): The request object. Request message for [AutoMl.UndeployModel][google.cloud.automl.v1.AutoMl.UndeployModel]. - name (:class:`str`): + name (str): Required. Resource name of the model to undeploy. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -1735,24 +1752,22 @@ def undeploy_model( sent along with the request as metadata. Returns: - ~.operation.Operation: + google.api_core.operation.Operation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that - you can re-use to avoid defining duplicated empty - messages in your APIs. A typical example is to use it as - the request or the response type of an API method. For - instance: + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: - :: + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); - service Foo { - rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); - } + } - The JSON representation for ``Empty`` is empty JSON - object ``{}``. + The JSON representation for Empty is empty JSON + object {}. """ # Create or coerce a protobuf request object. @@ -1822,20 +1837,22 @@ def export_model( completes. Args: - request (:class:`~.service.ExportModelRequest`): + request (google.cloud.automl_v1.types.ExportModelRequest): The request object. Request message for [AutoMl.ExportModel][google.cloud.automl.v1.AutoMl.ExportModel]. Models need to be enabled for exporting, otherwise an error code will be returned. - name (:class:`str`): + name (str): Required. The resource name of the model to export. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - output_config (:class:`~.io.ModelExportOutputConfig`): + output_config (google.cloud.automl_v1.types.ModelExportOutputConfig): Required. The desired output location and configuration. + This corresponds to the ``output_config`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -1847,24 +1864,22 @@ def export_model( sent along with the request as metadata. Returns: - ~.operation.Operation: + google.api_core.operation.Operation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that - you can re-use to avoid defining duplicated empty - messages in your APIs. A typical example is to use it as - the request or the response type of an API method. For - instance: + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: - :: + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); - service Foo { - rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); - } + } - The JSON representation for ``Empty`` is empty JSON - object ``{}``. + The JSON representation for Empty is empty JSON + object {}. """ # Create or coerce a protobuf request object. @@ -1928,12 +1943,13 @@ def get_model_evaluation( r"""Gets a model evaluation. Args: - request (:class:`~.service.GetModelEvaluationRequest`): + request (google.cloud.automl_v1.types.GetModelEvaluationRequest): The request object. Request message for [AutoMl.GetModelEvaluation][google.cloud.automl.v1.AutoMl.GetModelEvaluation]. - name (:class:`str`): + name (str): Required. Resource name for the model evaluation. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -1945,7 +1961,7 @@ def get_model_evaluation( sent along with the request as metadata. Returns: - ~.model_evaluation.ModelEvaluation: + google.cloud.automl_v1.types.ModelEvaluation: Evaluation results of a model. """ # Create or coerce a protobuf request object. @@ -2000,19 +2016,20 @@ def list_model_evaluations( r"""Lists model evaluations. Args: - request (:class:`~.service.ListModelEvaluationsRequest`): + request (google.cloud.automl_v1.types.ListModelEvaluationsRequest): The request object. Request message for [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations]. - parent (:class:`str`): + parent (str): Required. Resource name of the model to list the model evaluations for. If modelId is set as "-", this will list model evaluations from across all models of the parent location. + This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - filter (:class:`str`): + filter (str): Required. An expression for filtering the results of the request. @@ -2026,6 +2043,7 @@ def list_model_evaluations( 4. - ``NOT annotation_spec_id:*`` --> The model evaluation was done for aggregate of all annotation specs. + This corresponds to the ``filter`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -2037,7 +2055,7 @@ def list_model_evaluations( sent along with the request as metadata. Returns: - ~.pagers.ListModelEvaluationsPager: + google.cloud.automl_v1.services.auto_ml.pagers.ListModelEvaluationsPager: Response message for [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations]. diff --git a/google/cloud/automl_v1/services/auto_ml/pagers.py b/google/cloud/automl_v1/services/auto_ml/pagers.py index 4de690c7..73a0d958 100644 --- a/google/cloud/automl_v1/services/auto_ml/pagers.py +++ b/google/cloud/automl_v1/services/auto_ml/pagers.py @@ -15,7 +15,16 @@ # limitations under the License. # -from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple +from typing import ( + Any, + AsyncIterable, + Awaitable, + Callable, + Iterable, + Sequence, + Tuple, + Optional, +) from google.cloud.automl_v1.types import dataset from google.cloud.automl_v1.types import model @@ -27,7 +36,7 @@ class ListDatasetsPager: """A pager for iterating through ``list_datasets`` requests. This class thinly wraps an initial - :class:`~.service.ListDatasetsResponse` object, and + :class:`google.cloud.automl_v1.types.ListDatasetsResponse` object, and provides an ``__iter__`` method to iterate through its ``datasets`` field. @@ -36,7 +45,7 @@ class ListDatasetsPager: through the ``datasets`` field on the corresponding responses. - All the usual :class:`~.service.ListDatasetsResponse` + All the usual :class:`google.cloud.automl_v1.types.ListDatasetsResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ @@ -54,9 +63,9 @@ def __init__( Args: method (Callable): The method that was originally called, and which instantiated this pager. - request (:class:`~.service.ListDatasetsRequest`): + request (google.cloud.automl_v1.types.ListDatasetsRequest): The initial request object. - response (:class:`~.service.ListDatasetsResponse`): + response (google.cloud.automl_v1.types.ListDatasetsResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. @@ -89,7 +98,7 @@ class ListDatasetsAsyncPager: """A pager for iterating through ``list_datasets`` requests. This class thinly wraps an initial - :class:`~.service.ListDatasetsResponse` object, and + :class:`google.cloud.automl_v1.types.ListDatasetsResponse` object, and provides an ``__aiter__`` method to iterate through its ``datasets`` field. @@ -98,7 +107,7 @@ class ListDatasetsAsyncPager: through the ``datasets`` field on the corresponding responses. - All the usual :class:`~.service.ListDatasetsResponse` + All the usual :class:`google.cloud.automl_v1.types.ListDatasetsResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ @@ -116,9 +125,9 @@ def __init__( Args: method (Callable): The method that was originally called, and which instantiated this pager. - request (:class:`~.service.ListDatasetsRequest`): + request (google.cloud.automl_v1.types.ListDatasetsRequest): The initial request object. - response (:class:`~.service.ListDatasetsResponse`): + response (google.cloud.automl_v1.types.ListDatasetsResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. @@ -155,7 +164,7 @@ class ListModelsPager: """A pager for iterating through ``list_models`` requests. This class thinly wraps an initial - :class:`~.service.ListModelsResponse` object, and + :class:`google.cloud.automl_v1.types.ListModelsResponse` object, and provides an ``__iter__`` method to iterate through its ``model`` field. @@ -164,7 +173,7 @@ class ListModelsPager: through the ``model`` field on the corresponding responses. - All the usual :class:`~.service.ListModelsResponse` + All the usual :class:`google.cloud.automl_v1.types.ListModelsResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ @@ -182,9 +191,9 @@ def __init__( Args: method (Callable): The method that was originally called, and which instantiated this pager. - request (:class:`~.service.ListModelsRequest`): + request (google.cloud.automl_v1.types.ListModelsRequest): The initial request object. - response (:class:`~.service.ListModelsResponse`): + response (google.cloud.automl_v1.types.ListModelsResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. @@ -217,7 +226,7 @@ class ListModelsAsyncPager: """A pager for iterating through ``list_models`` requests. This class thinly wraps an initial - :class:`~.service.ListModelsResponse` object, and + :class:`google.cloud.automl_v1.types.ListModelsResponse` object, and provides an ``__aiter__`` method to iterate through its ``model`` field. @@ -226,7 +235,7 @@ class ListModelsAsyncPager: through the ``model`` field on the corresponding responses. - All the usual :class:`~.service.ListModelsResponse` + All the usual :class:`google.cloud.automl_v1.types.ListModelsResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ @@ -244,9 +253,9 @@ def __init__( Args: method (Callable): The method that was originally called, and which instantiated this pager. - request (:class:`~.service.ListModelsRequest`): + request (google.cloud.automl_v1.types.ListModelsRequest): The initial request object. - response (:class:`~.service.ListModelsResponse`): + response (google.cloud.automl_v1.types.ListModelsResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. @@ -283,7 +292,7 @@ class ListModelEvaluationsPager: """A pager for iterating through ``list_model_evaluations`` requests. This class thinly wraps an initial - :class:`~.service.ListModelEvaluationsResponse` object, and + :class:`google.cloud.automl_v1.types.ListModelEvaluationsResponse` object, and provides an ``__iter__`` method to iterate through its ``model_evaluation`` field. @@ -292,7 +301,7 @@ class ListModelEvaluationsPager: through the ``model_evaluation`` field on the corresponding responses. - All the usual :class:`~.service.ListModelEvaluationsResponse` + All the usual :class:`google.cloud.automl_v1.types.ListModelEvaluationsResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ @@ -310,9 +319,9 @@ def __init__( Args: method (Callable): The method that was originally called, and which instantiated this pager. - request (:class:`~.service.ListModelEvaluationsRequest`): + request (google.cloud.automl_v1.types.ListModelEvaluationsRequest): The initial request object. - response (:class:`~.service.ListModelEvaluationsResponse`): + response (google.cloud.automl_v1.types.ListModelEvaluationsResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. @@ -345,7 +354,7 @@ class ListModelEvaluationsAsyncPager: """A pager for iterating through ``list_model_evaluations`` requests. This class thinly wraps an initial - :class:`~.service.ListModelEvaluationsResponse` object, and + :class:`google.cloud.automl_v1.types.ListModelEvaluationsResponse` object, and provides an ``__aiter__`` method to iterate through its ``model_evaluation`` field. @@ -354,7 +363,7 @@ class ListModelEvaluationsAsyncPager: through the ``model_evaluation`` field on the corresponding responses. - All the usual :class:`~.service.ListModelEvaluationsResponse` + All the usual :class:`google.cloud.automl_v1.types.ListModelEvaluationsResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ @@ -372,9 +381,9 @@ def __init__( Args: method (Callable): The method that was originally called, and which instantiated this pager. - request (:class:`~.service.ListModelEvaluationsRequest`): + request (google.cloud.automl_v1.types.ListModelEvaluationsRequest): The initial request object. - response (:class:`~.service.ListModelEvaluationsResponse`): + response (google.cloud.automl_v1.types.ListModelEvaluationsResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. diff --git a/google/cloud/automl_v1/services/auto_ml/transports/base.py b/google/cloud/automl_v1/services/auto_ml/transports/base.py index 7deddee8..19b87223 100644 --- a/google/cloud/automl_v1/services/auto_ml/transports/base.py +++ b/google/cloud/automl_v1/services/auto_ml/transports/base.py @@ -75,10 +75,10 @@ def __init__( scope (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. @@ -86,6 +86,9 @@ def __init__( host += ":443" self._host = host + # Save the scopes. + self._scopes = scopes or self.AUTH_SCOPES + # If no credentials are provided, then determine the appropriate # defaults. if credentials and credentials_file: @@ -95,20 +98,17 @@ def __init__( if credentials_file is not None: credentials, _ = auth.load_credentials_from_file( - credentials_file, scopes=scopes, quota_project_id=quota_project_id + credentials_file, scopes=self._scopes, quota_project_id=quota_project_id ) elif credentials is None: credentials, _ = auth.default( - scopes=scopes, quota_project_id=quota_project_id + scopes=self._scopes, quota_project_id=quota_project_id ) # Save the credentials. self._credentials = credentials - # Lifted into its own function so it can be stubbed out during tests. - self._prep_wrapped_messages(client_info) - def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { @@ -124,6 +124,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=5.0, ), default_timeout=5.0, client_info=client_info, @@ -137,6 +138,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=5.0, ), default_timeout=5.0, client_info=client_info, @@ -153,6 +155,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=5.0, ), default_timeout=5.0, client_info=client_info, @@ -172,6 +175,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=5.0, ), default_timeout=5.0, client_info=client_info, @@ -188,6 +192,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=5.0, ), default_timeout=5.0, client_info=client_info, @@ -201,6 +206,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=5.0, ), default_timeout=5.0, client_info=client_info, @@ -214,6 +220,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=5.0, ), default_timeout=5.0, client_info=client_info, @@ -239,6 +246,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=5.0, ), default_timeout=5.0, client_info=client_info, @@ -252,6 +260,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=5.0, ), default_timeout=5.0, client_info=client_info, diff --git a/google/cloud/automl_v1/services/auto_ml/transports/grpc.py b/google/cloud/automl_v1/services/auto_ml/transports/grpc.py index 5f2f7d97..e2f36ade 100644 --- a/google/cloud/automl_v1/services/auto_ml/transports/grpc.py +++ b/google/cloud/automl_v1/services/auto_ml/transports/grpc.py @@ -79,6 +79,7 @@ def __init__( api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: @@ -109,6 +110,10 @@ def __init__( ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): @@ -123,72 +128,61 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - warnings.warn( - "api_mtls_endpoint and client_cert_source are deprecated", - DeprecationWarning, - ) - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) else: - ssl_credentials = SslCredentials().ssl_credentials + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials - else: - host = host if ":" in host else host + ":443" - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, - ssl_credentials=ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -196,18 +190,8 @@ def __init__( ], ) - self._stubs = {} # type: Dict[str, Callable] - self._operations_client = None - - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @classmethod def create_channel( @@ -221,7 +205,7 @@ def create_channel( ) -> grpc.Channel: """Create and return a gRPC channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If diff --git a/google/cloud/automl_v1/services/auto_ml/transports/grpc_asyncio.py b/google/cloud/automl_v1/services/auto_ml/transports/grpc_asyncio.py index bcda8baf..46144f01 100644 --- a/google/cloud/automl_v1/services/auto_ml/transports/grpc_asyncio.py +++ b/google/cloud/automl_v1/services/auto_ml/transports/grpc_asyncio.py @@ -83,7 +83,7 @@ def create_channel( ) -> aio.Channel: """Create and return a gRPC AsyncIO channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -123,6 +123,7 @@ def __init__( api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id=None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: @@ -154,12 +155,16 @@ def __init__( ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: @@ -168,72 +173,61 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - warnings.warn( - "api_mtls_endpoint and client_cert_source are deprecated", - DeprecationWarning, - ) - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) else: - ssl_credentials = SslCredentials().ssl_credentials + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials - else: - host = host if ":" in host else host + ":443" - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, - ssl_credentials=ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -241,18 +235,8 @@ def __init__( ], ) - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) - - self._stubs = {} - self._operations_client = None + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @property def grpc_channel(self) -> aio.Channel: diff --git a/google/cloud/automl_v1/services/prediction_service/async_client.py b/google/cloud/automl_v1/services/prediction_service/async_client.py index 7f922fb3..f420199a 100644 --- a/google/cloud/automl_v1/services/prediction_service/async_client.py +++ b/google/cloud/automl_v1/services/prediction_service/async_client.py @@ -85,7 +85,36 @@ class PredictionServiceAsyncClient: PredictionServiceClient.parse_common_location_path ) - from_service_account_file = PredictionServiceClient.from_service_account_file + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PredictionServiceAsyncClient: The constructed client. + """ + return PredictionServiceClient.from_service_account_info.__func__(PredictionServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PredictionServiceAsyncClient: The constructed client. + """ + return PredictionServiceClient.from_service_account_file.__func__(PredictionServiceAsyncClient, filename, *args, **kwargs) # type: ignore + from_service_account_json = from_service_account_file @property @@ -199,24 +228,26 @@ async def predict( up to 5MB. Not available for FORECASTING ``prediction_type``. Args: - request (:class:`~.prediction_service.PredictRequest`): + request (:class:`google.cloud.automl_v1.types.PredictRequest`): The request object. Request message for [PredictionService.Predict][google.cloud.automl.v1.PredictionService.Predict]. name (:class:`str`): Required. Name of the model requested to serve the prediction. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - payload (:class:`~.data_items.ExamplePayload`): + payload (:class:`google.cloud.automl_v1.types.ExamplePayload`): Required. Payload to perform a prediction on. The payload must match the problem type that the model was trained to solve. + This corresponds to the ``payload`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - params (:class:`Sequence[~.prediction_service.PredictRequest.ParamsEntry]`): + params (:class:`Sequence[google.cloud.automl_v1.types.PredictRequest.ParamsEntry]`): Additional domain-specific parameters, any string must be up to 25000 characters long. @@ -247,6 +278,7 @@ async def predict( is populated in the returned list of [TablesAnnotation][google.cloud.automl.v1.TablesAnnotation] objects. The default is false. + This corresponds to the ``params`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -258,7 +290,7 @@ async def predict( sent along with the request as metadata. Returns: - ~.prediction_service.PredictResponse: + google.cloud.automl_v1.types.PredictResponse: Response message for [PredictionService.Predict][google.cloud.automl.v1.PredictionService.Predict]. @@ -340,29 +372,32 @@ async def batch_predict( - AutoML Tables Args: - request (:class:`~.prediction_service.BatchPredictRequest`): + request (:class:`google.cloud.automl_v1.types.BatchPredictRequest`): The request object. Request message for [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict]. name (:class:`str`): Required. Name of the model requested to serve the batch prediction. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - input_config (:class:`~.io.BatchPredictInputConfig`): + input_config (:class:`google.cloud.automl_v1.types.BatchPredictInputConfig`): Required. The input configuration for batch prediction. + This corresponds to the ``input_config`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - output_config (:class:`~.io.BatchPredictOutputConfig`): + output_config (:class:`google.cloud.automl_v1.types.BatchPredictOutputConfig`): Required. The Configuration specifying where output predictions should be written. + This corresponds to the ``output_config`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - params (:class:`Sequence[~.prediction_service.BatchPredictRequest.ParamsEntry]`): + params (:class:`Sequence[google.cloud.automl_v1.types.BatchPredictRequest.ParamsEntry]`): Additional domain-specific parameters for the predictions, any string must be up to 25000 characters long. @@ -447,6 +482,7 @@ async def batch_predict( with shortest edge at least that long as a relative value of video frame size are returned. Value in 0 to 1 range. Default is 0. + This corresponds to the ``params`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -458,15 +494,13 @@ async def batch_predict( sent along with the request as metadata. Returns: - ~.operation_async.AsyncOperation: + google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.prediction_service.BatchPredictResult``: - Result of the Batch Predict. This message is returned in - [response][google.longrunning.Operation.response] of the - operation returned by the - [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict]. + The result type for the operation will be :class:`google.cloud.automl_v1.types.BatchPredictResult` Result of the Batch Predict. This message is returned in + [response][google.longrunning.Operation.response] of + the operation returned by the + [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict]. """ # Create or coerce a protobuf request object. diff --git a/google/cloud/automl_v1/services/prediction_service/client.py b/google/cloud/automl_v1/services/prediction_service/client.py index a56b5a30..5f5fc44b 100644 --- a/google/cloud/automl_v1/services/prediction_service/client.py +++ b/google/cloud/automl_v1/services/prediction_service/client.py @@ -121,6 +121,22 @@ def _get_default_mtls_endpoint(api_endpoint): DEFAULT_ENDPOINT ) + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PredictionServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials @@ -133,7 +149,7 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): kwargs: Additional arguments to pass to the constructor. Returns: - {@api.name}: The constructed client. + PredictionServiceClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_file(filename) kwargs["credentials"] = credentials @@ -241,10 +257,10 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - transport (Union[str, ~.PredictionServiceTransport]): The + transport (Union[str, PredictionServiceTransport]): The transport to use. If set to None, a transport is chosen automatically. - client_options (client_options_lib.ClientOptions): Custom options for the + client_options (google.api_core.client_options.ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT @@ -280,21 +296,17 @@ def __init__( util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) ) - ssl_credentials = None + client_cert_source_func = None is_mtls = False if use_client_cert: if client_options.client_cert_source: - import grpc # type: ignore - - cert, key = client_options.client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) is_mtls = True + client_cert_source_func = client_options.client_cert_source else: - creds = SslCredentials() - is_mtls = creds.is_mtls - ssl_credentials = creds.ssl_credentials if is_mtls else None + is_mtls = mtls.has_default_client_cert_source() + client_cert_source_func = ( + mtls.default_client_cert_source() if is_mtls else None + ) # Figure out which api endpoint to use. if client_options.api_endpoint is not None: @@ -337,7 +349,7 @@ def __init__( credentials_file=client_options.credentials_file, host=api_endpoint, scopes=client_options.scopes, - ssl_channel_credentials=ssl_credentials, + client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, ) @@ -392,24 +404,26 @@ def predict( up to 5MB. Not available for FORECASTING ``prediction_type``. Args: - request (:class:`~.prediction_service.PredictRequest`): + request (google.cloud.automl_v1.types.PredictRequest): The request object. Request message for [PredictionService.Predict][google.cloud.automl.v1.PredictionService.Predict]. - name (:class:`str`): + name (str): Required. Name of the model requested to serve the prediction. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - payload (:class:`~.data_items.ExamplePayload`): + payload (google.cloud.automl_v1.types.ExamplePayload): Required. Payload to perform a prediction on. The payload must match the problem type that the model was trained to solve. + This corresponds to the ``payload`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - params (:class:`Sequence[~.prediction_service.PredictRequest.ParamsEntry]`): + params (Sequence[google.cloud.automl_v1.types.PredictRequest.ParamsEntry]): Additional domain-specific parameters, any string must be up to 25000 characters long. @@ -440,6 +454,7 @@ def predict( is populated in the returned list of [TablesAnnotation][google.cloud.automl.v1.TablesAnnotation] objects. The default is false. + This corresponds to the ``params`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -451,7 +466,7 @@ def predict( sent along with the request as metadata. Returns: - ~.prediction_service.PredictResponse: + google.cloud.automl_v1.types.PredictResponse: Response message for [PredictionService.Predict][google.cloud.automl.v1.PredictionService.Predict]. @@ -480,9 +495,8 @@ def predict( request.name = name if payload is not None: request.payload = payload - - if params: - request.params.update(params) + if params is not None: + request.params = params # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -534,29 +548,32 @@ def batch_predict( - AutoML Tables Args: - request (:class:`~.prediction_service.BatchPredictRequest`): + request (google.cloud.automl_v1.types.BatchPredictRequest): The request object. Request message for [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict]. - name (:class:`str`): + name (str): Required. Name of the model requested to serve the batch prediction. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - input_config (:class:`~.io.BatchPredictInputConfig`): + input_config (google.cloud.automl_v1.types.BatchPredictInputConfig): Required. The input configuration for batch prediction. + This corresponds to the ``input_config`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - output_config (:class:`~.io.BatchPredictOutputConfig`): + output_config (google.cloud.automl_v1.types.BatchPredictOutputConfig): Required. The Configuration specifying where output predictions should be written. + This corresponds to the ``output_config`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - params (:class:`Sequence[~.prediction_service.BatchPredictRequest.ParamsEntry]`): + params (Sequence[google.cloud.automl_v1.types.BatchPredictRequest.ParamsEntry]): Additional domain-specific parameters for the predictions, any string must be up to 25000 characters long. @@ -641,6 +658,7 @@ def batch_predict( with shortest edge at least that long as a relative value of video frame size are returned. Value in 0 to 1 range. Default is 0. + This corresponds to the ``params`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -652,15 +670,13 @@ def batch_predict( sent along with the request as metadata. Returns: - ~.operation.Operation: + google.api_core.operation.Operation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.prediction_service.BatchPredictResult``: - Result of the Batch Predict. This message is returned in - [response][google.longrunning.Operation.response] of the - operation returned by the - [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict]. + The result type for the operation will be :class:`google.cloud.automl_v1.types.BatchPredictResult` Result of the Batch Predict. This message is returned in + [response][google.longrunning.Operation.response] of + the operation returned by the + [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict]. """ # Create or coerce a protobuf request object. @@ -689,9 +705,8 @@ def batch_predict( request.input_config = input_config if output_config is not None: request.output_config = output_config - - if params: - request.params.update(params) + if params is not None: + request.params = params # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. diff --git a/google/cloud/automl_v1/services/prediction_service/transports/base.py b/google/cloud/automl_v1/services/prediction_service/transports/base.py index f019a8dc..148e1307 100644 --- a/google/cloud/automl_v1/services/prediction_service/transports/base.py +++ b/google/cloud/automl_v1/services/prediction_service/transports/base.py @@ -69,10 +69,10 @@ def __init__( scope (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. @@ -80,6 +80,9 @@ def __init__( host += ":443" self._host = host + # Save the scopes. + self._scopes = scopes or self.AUTH_SCOPES + # If no credentials are provided, then determine the appropriate # defaults. if credentials and credentials_file: @@ -89,20 +92,17 @@ def __init__( if credentials_file is not None: credentials, _ = auth.load_credentials_from_file( - credentials_file, scopes=scopes, quota_project_id=quota_project_id + credentials_file, scopes=self._scopes, quota_project_id=quota_project_id ) elif credentials is None: credentials, _ = auth.default( - scopes=scopes, quota_project_id=quota_project_id + scopes=self._scopes, quota_project_id=quota_project_id ) # Save the credentials. self._credentials = credentials - # Lifted into its own function so it can be stubbed out during tests. - self._prep_wrapped_messages(client_info) - def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { diff --git a/google/cloud/automl_v1/services/prediction_service/transports/grpc.py b/google/cloud/automl_v1/services/prediction_service/transports/grpc.py index ca3220d3..cc2f2a2e 100644 --- a/google/cloud/automl_v1/services/prediction_service/transports/grpc.py +++ b/google/cloud/automl_v1/services/prediction_service/transports/grpc.py @@ -62,6 +62,7 @@ def __init__( api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: @@ -92,6 +93,10 @@ def __init__( ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): @@ -106,72 +111,61 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - warnings.warn( - "api_mtls_endpoint and client_cert_source are deprecated", - DeprecationWarning, - ) - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) else: - ssl_credentials = SslCredentials().ssl_credentials + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials - else: - host = host if ":" in host else host + ":443" - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, - ssl_credentials=ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -179,18 +173,8 @@ def __init__( ], ) - self._stubs = {} # type: Dict[str, Callable] - self._operations_client = None - - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @classmethod def create_channel( @@ -204,7 +188,7 @@ def create_channel( ) -> grpc.Channel: """Create and return a gRPC channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If diff --git a/google/cloud/automl_v1/services/prediction_service/transports/grpc_asyncio.py b/google/cloud/automl_v1/services/prediction_service/transports/grpc_asyncio.py index e23a8c65..70f8ddf7 100644 --- a/google/cloud/automl_v1/services/prediction_service/transports/grpc_asyncio.py +++ b/google/cloud/automl_v1/services/prediction_service/transports/grpc_asyncio.py @@ -66,7 +66,7 @@ def create_channel( ) -> aio.Channel: """Create and return a gRPC AsyncIO channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -106,6 +106,7 @@ def __init__( api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id=None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: @@ -137,12 +138,16 @@ def __init__( ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: @@ -151,72 +156,61 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - warnings.warn( - "api_mtls_endpoint and client_cert_source are deprecated", - DeprecationWarning, - ) - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) else: - ssl_credentials = SslCredentials().ssl_credentials + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials - else: - host = host if ":" in host else host + ":443" - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, - ssl_credentials=ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -224,18 +218,8 @@ def __init__( ], ) - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) - - self._stubs = {} - self._operations_client = None + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @property def grpc_channel(self) -> aio.Channel: diff --git a/google/cloud/automl_v1/types/__init__.py b/google/cloud/automl_v1/types/__init__.py index a95d320a..3d40bbce 100644 --- a/google/cloud/automl_v1/types/__init__.py +++ b/google/cloud/automl_v1/types/__init__.py @@ -15,196 +15,196 @@ # limitations under the License. # +from .annotation_payload import AnnotationPayload +from .annotation_spec import AnnotationSpec from .classification import ( ClassificationAnnotation, ClassificationEvaluationMetrics, ClassificationType, ) -from .geometry import ( - NormalizedVertex, - BoundingPoly, +from .data_items import ( + Document, + DocumentDimensions, + ExamplePayload, + Image, + TextSnippet, ) +from .dataset import Dataset from .detection import ( - ImageObjectDetectionAnnotation, BoundingBoxMetricsEntry, + ImageObjectDetectionAnnotation, ImageObjectDetectionEvaluationMetrics, ) -from .text_segment import TextSegment -from .text_extraction import ( - TextExtractionAnnotation, - TextExtractionEvaluationMetrics, -) -from .text_sentiment import ( - TextSentimentAnnotation, - TextSentimentEvaluationMetrics, -) -from .io import ( - InputConfig, - BatchPredictInputConfig, - DocumentInputConfig, - OutputConfig, - BatchPredictOutputConfig, - ModelExportOutputConfig, - GcsSource, - GcsDestination, -) -from .data_items import ( - Image, - TextSnippet, - DocumentDimensions, - Document, - ExamplePayload, -) -from .translation import ( - TranslationDatasetMetadata, - TranslationEvaluationMetrics, - TranslationModelMetadata, - TranslationAnnotation, +from .geometry import ( + BoundingPoly, + NormalizedVertex, ) -from .annotation_payload import AnnotationPayload -from .annotation_spec import AnnotationSpec from .image import ( ImageClassificationDatasetMetadata, - ImageObjectDetectionDatasetMetadata, - ImageClassificationModelMetadata, - ImageObjectDetectionModelMetadata, ImageClassificationModelDeploymentMetadata, + ImageClassificationModelMetadata, + ImageObjectDetectionDatasetMetadata, ImageObjectDetectionModelDeploymentMetadata, + ImageObjectDetectionModelMetadata, ) -from .text import ( - TextClassificationDatasetMetadata, - TextClassificationModelMetadata, - TextExtractionDatasetMetadata, - TextExtractionModelMetadata, - TextSentimentDatasetMetadata, - TextSentimentModelMetadata, +from .io import ( + BatchPredictInputConfig, + BatchPredictOutputConfig, + DocumentInputConfig, + GcsDestination, + GcsSource, + InputConfig, + ModelExportOutputConfig, + OutputConfig, ) -from .dataset import Dataset from .model import Model from .model_evaluation import ModelEvaluation from .operations import ( - OperationMetadata, - DeleteOperationMetadata, - DeployModelOperationMetadata, - UndeployModelOperationMetadata, + BatchPredictOperationMetadata, CreateDatasetOperationMetadata, CreateModelOperationMetadata, - ImportDataOperationMetadata, + DeleteOperationMetadata, + DeployModelOperationMetadata, ExportDataOperationMetadata, - BatchPredictOperationMetadata, ExportModelOperationMetadata, + ImportDataOperationMetadata, + OperationMetadata, + UndeployModelOperationMetadata, ) from .prediction_service import ( - PredictRequest, - PredictResponse, BatchPredictRequest, BatchPredictResult, + PredictRequest, + PredictResponse, ) from .service import ( CreateDatasetRequest, - GetDatasetRequest, - ListDatasetsRequest, - ListDatasetsResponse, - UpdateDatasetRequest, - DeleteDatasetRequest, - ImportDataRequest, - ExportDataRequest, - GetAnnotationSpecRequest, CreateModelRequest, - GetModelRequest, - ListModelsRequest, - ListModelsResponse, + DeleteDatasetRequest, DeleteModelRequest, - UpdateModelRequest, DeployModelRequest, - UndeployModelRequest, + ExportDataRequest, ExportModelRequest, + GetAnnotationSpecRequest, + GetDatasetRequest, GetModelEvaluationRequest, + GetModelRequest, + ImportDataRequest, + ListDatasetsRequest, + ListDatasetsResponse, ListModelEvaluationsRequest, ListModelEvaluationsResponse, + ListModelsRequest, + ListModelsResponse, + UndeployModelRequest, + UpdateDatasetRequest, + UpdateModelRequest, +) +from .text import ( + TextClassificationDatasetMetadata, + TextClassificationModelMetadata, + TextExtractionDatasetMetadata, + TextExtractionModelMetadata, + TextSentimentDatasetMetadata, + TextSentimentModelMetadata, +) +from .text_extraction import ( + TextExtractionAnnotation, + TextExtractionEvaluationMetrics, +) +from .text_segment import TextSegment +from .text_sentiment import ( + TextSentimentAnnotation, + TextSentimentEvaluationMetrics, +) +from .translation import ( + TranslationAnnotation, + TranslationDatasetMetadata, + TranslationEvaluationMetrics, + TranslationModelMetadata, ) __all__ = ( + "AnnotationPayload", + "AnnotationSpec", "ClassificationAnnotation", "ClassificationEvaluationMetrics", "ClassificationType", - "NormalizedVertex", - "BoundingPoly", - "ImageObjectDetectionAnnotation", - "BoundingBoxMetricsEntry", - "ImageObjectDetectionEvaluationMetrics", - "TextSegment", - "TextExtractionAnnotation", - "TextExtractionEvaluationMetrics", - "TextSentimentAnnotation", - "TextSentimentEvaluationMetrics", - "InputConfig", - "BatchPredictInputConfig", - "DocumentInputConfig", - "OutputConfig", - "BatchPredictOutputConfig", - "ModelExportOutputConfig", - "GcsSource", - "GcsDestination", - "Image", - "TextSnippet", - "DocumentDimensions", "Document", + "DocumentDimensions", "ExamplePayload", - "TranslationDatasetMetadata", - "TranslationEvaluationMetrics", - "TranslationModelMetadata", - "TranslationAnnotation", - "AnnotationPayload", - "AnnotationSpec", + "Image", + "TextSnippet", + "Dataset", + "BoundingBoxMetricsEntry", + "ImageObjectDetectionAnnotation", + "ImageObjectDetectionEvaluationMetrics", + "BoundingPoly", + "NormalizedVertex", "ImageClassificationDatasetMetadata", - "ImageObjectDetectionDatasetMetadata", - "ImageClassificationModelMetadata", - "ImageObjectDetectionModelMetadata", "ImageClassificationModelDeploymentMetadata", + "ImageClassificationModelMetadata", + "ImageObjectDetectionDatasetMetadata", "ImageObjectDetectionModelDeploymentMetadata", - "TextClassificationDatasetMetadata", - "TextClassificationModelMetadata", - "TextExtractionDatasetMetadata", - "TextExtractionModelMetadata", - "TextSentimentDatasetMetadata", - "TextSentimentModelMetadata", - "Dataset", + "ImageObjectDetectionModelMetadata", + "BatchPredictInputConfig", + "BatchPredictOutputConfig", + "DocumentInputConfig", + "GcsDestination", + "GcsSource", + "InputConfig", + "ModelExportOutputConfig", + "OutputConfig", "Model", "ModelEvaluation", - "OperationMetadata", - "DeleteOperationMetadata", - "DeployModelOperationMetadata", - "UndeployModelOperationMetadata", + "BatchPredictOperationMetadata", "CreateDatasetOperationMetadata", "CreateModelOperationMetadata", - "ImportDataOperationMetadata", + "DeleteOperationMetadata", + "DeployModelOperationMetadata", "ExportDataOperationMetadata", - "BatchPredictOperationMetadata", "ExportModelOperationMetadata", - "PredictRequest", - "PredictResponse", + "ImportDataOperationMetadata", + "OperationMetadata", + "UndeployModelOperationMetadata", "BatchPredictRequest", "BatchPredictResult", + "PredictRequest", + "PredictResponse", "CreateDatasetRequest", - "GetDatasetRequest", - "ListDatasetsRequest", - "ListDatasetsResponse", - "UpdateDatasetRequest", - "DeleteDatasetRequest", - "ImportDataRequest", - "ExportDataRequest", - "GetAnnotationSpecRequest", "CreateModelRequest", - "GetModelRequest", - "ListModelsRequest", - "ListModelsResponse", + "DeleteDatasetRequest", "DeleteModelRequest", - "UpdateModelRequest", "DeployModelRequest", - "UndeployModelRequest", + "ExportDataRequest", "ExportModelRequest", + "GetAnnotationSpecRequest", + "GetDatasetRequest", "GetModelEvaluationRequest", + "GetModelRequest", + "ImportDataRequest", + "ListDatasetsRequest", + "ListDatasetsResponse", "ListModelEvaluationsRequest", "ListModelEvaluationsResponse", + "ListModelsRequest", + "ListModelsResponse", + "UndeployModelRequest", + "UpdateDatasetRequest", + "UpdateModelRequest", + "TextClassificationDatasetMetadata", + "TextClassificationModelMetadata", + "TextExtractionDatasetMetadata", + "TextExtractionModelMetadata", + "TextSentimentDatasetMetadata", + "TextSentimentModelMetadata", + "TextExtractionAnnotation", + "TextExtractionEvaluationMetrics", + "TextSegment", + "TextSentimentAnnotation", + "TextSentimentEvaluationMetrics", + "TranslationAnnotation", + "TranslationDatasetMetadata", + "TranslationEvaluationMetrics", + "TranslationModelMetadata", ) diff --git a/google/cloud/automl_v1/types/annotation_payload.py b/google/cloud/automl_v1/types/annotation_payload.py index 911c6e42..d3106429 100644 --- a/google/cloud/automl_v1/types/annotation_payload.py +++ b/google/cloud/automl_v1/types/annotation_payload.py @@ -34,17 +34,17 @@ class AnnotationPayload(proto.Message): r"""Contains annotation information that is relevant to AutoML. Attributes: - translation (~.gca_translation.TranslationAnnotation): + translation (google.cloud.automl_v1.types.TranslationAnnotation): Annotation details for translation. - classification (~.gca_classification.ClassificationAnnotation): + classification (google.cloud.automl_v1.types.ClassificationAnnotation): Annotation details for content or image classification. - image_object_detection (~.detection.ImageObjectDetectionAnnotation): + image_object_detection (google.cloud.automl_v1.types.ImageObjectDetectionAnnotation): Annotation details for image object detection. - text_extraction (~.gca_text_extraction.TextExtractionAnnotation): + text_extraction (google.cloud.automl_v1.types.TextExtractionAnnotation): Annotation details for text extraction. - text_sentiment (~.gca_text_sentiment.TextSentimentAnnotation): + text_sentiment (google.cloud.automl_v1.types.TextSentimentAnnotation): Annotation details for text sentiment. annotation_spec_id (str): Output only . The resource ID of the diff --git a/google/cloud/automl_v1/types/classification.py b/google/cloud/automl_v1/types/classification.py index c925f206..873c7501 100644 --- a/google/cloud/automl_v1/types/classification.py +++ b/google/cloud/automl_v1/types/classification.py @@ -68,7 +68,7 @@ class ClassificationEvaluationMetrics(proto.Message): averaged for the overall evaluation. log_loss (float): Output only. The Log Loss metric. - confidence_metrics_entry (Sequence[~.classification.ClassificationEvaluationMetrics.ConfidenceMetricsEntry]): + confidence_metrics_entry (Sequence[google.cloud.automl_v1.types.ClassificationEvaluationMetrics.ConfidenceMetricsEntry]): Output only. Metrics for each confidence_threshold in 0.00,0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 and position_threshold = INT32_MAX_VALUE. ROC and @@ -76,7 +76,7 @@ class ClassificationEvaluationMetrics(proto.Message): derived from them. The confidence metrics entries may also be supplied for additional values of position_threshold, but from these no aggregated metrics are computed. - confusion_matrix (~.classification.ClassificationEvaluationMetrics.ConfusionMatrix): + confusion_matrix (google.cloud.automl_v1.types.ClassificationEvaluationMetrics.ConfusionMatrix): Output only. Confusion matrix of the evaluation. Only set for MULTICLASS classification problems where number of labels @@ -196,7 +196,7 @@ class ConfusionMatrix(proto.Message): [prediction_type-s][google.cloud.automl.v1p1beta.TablesModelMetadata.prediction_type], distinct values of the target column at the moment of the model evaluation are populated here. - row (Sequence[~.classification.ClassificationEvaluationMetrics.ConfusionMatrix.Row]): + row (Sequence[google.cloud.automl_v1.types.ClassificationEvaluationMetrics.ConfusionMatrix.Row]): Output only. Rows in the confusion matrix. The number of rows is equal to the size of ``annotation_spec_id``. ``row[i].example_count[j]`` is the number of examples that diff --git a/google/cloud/automl_v1/types/data_items.py b/google/cloud/automl_v1/types/data_items.py index 554ed762..7b292c86 100644 --- a/google/cloud/automl_v1/types/data_items.py +++ b/google/cloud/automl_v1/types/data_items.py @@ -83,7 +83,7 @@ class DocumentDimensions(proto.Message): r"""Message that describes dimension of a document. Attributes: - unit (~.data_items.DocumentDimensions.DocumentDimensionUnit): + unit (google.cloud.automl_v1.types.DocumentDimensions.DocumentDimensionUnit): Unit of the dimension. width (float): Width value of the document, works together @@ -111,15 +111,15 @@ class Document(proto.Message): r"""A structured text document e.g. a PDF. Attributes: - input_config (~.io.DocumentInputConfig): + input_config (google.cloud.automl_v1.types.DocumentInputConfig): An input config specifying the content of the document. - document_text (~.data_items.TextSnippet): + document_text (google.cloud.automl_v1.types.TextSnippet): The plain text version of this document. - layout (Sequence[~.data_items.Document.Layout]): + layout (Sequence[google.cloud.automl_v1.types.Document.Layout]): Describes the layout of the document. Sorted by [page_number][]. - document_dimensions (~.data_items.DocumentDimensions): + document_dimensions (google.cloud.automl_v1.types.DocumentDimensions): The dimensions of the page in the document. page_count (int): Number of pages in the document. @@ -131,14 +131,14 @@ class Layout(proto.Message): in the document. Attributes: - text_segment (~.gca_text_segment.TextSegment): + text_segment (google.cloud.automl_v1.types.TextSegment): Text Segment that represents a segment in [document_text][google.cloud.automl.v1p1beta.Document.document_text]. page_number (int): Page number of the [text_segment][google.cloud.automl.v1.Document.Layout.text_segment] in the original document, starts from 1. - bounding_poly (~.geometry.BoundingPoly): + bounding_poly (google.cloud.automl_v1.types.BoundingPoly): The position of the [text_segment][google.cloud.automl.v1.Document.Layout.text_segment] in the page. Contains exactly 4 @@ -149,7 +149,7 @@ class Layout(proto.Message): [NormalizedVertex-s][google.cloud.automl.v1p1beta.NormalizedVertex] are relative to the page. Coordinates are based on top-left as point (0,0). - text_segment_type (~.data_items.Document.Layout.TextSegmentType): + text_segment_type (google.cloud.automl_v1.types.Document.Layout.TextSegmentType): The type of the [text_segment][google.cloud.automl.v1.Document.Layout.text_segment] in document. @@ -201,11 +201,11 @@ class ExamplePayload(proto.Message): r"""Example data used for training or prediction. Attributes: - image (~.data_items.Image): + image (google.cloud.automl_v1.types.Image): Example image. - text_snippet (~.data_items.TextSnippet): + text_snippet (google.cloud.automl_v1.types.TextSnippet): Example text. - document (~.data_items.Document): + document (google.cloud.automl_v1.types.Document): Example document. """ diff --git a/google/cloud/automl_v1/types/dataset.py b/google/cloud/automl_v1/types/dataset.py index c085c3e6..1cd45c39 100644 --- a/google/cloud/automl_v1/types/dataset.py +++ b/google/cloud/automl_v1/types/dataset.py @@ -33,21 +33,21 @@ class Dataset(proto.Message): annotated. Attributes: - translation_dataset_metadata (~.translation.TranslationDatasetMetadata): + translation_dataset_metadata (google.cloud.automl_v1.types.TranslationDatasetMetadata): Metadata for a dataset used for translation. - image_classification_dataset_metadata (~.image.ImageClassificationDatasetMetadata): + image_classification_dataset_metadata (google.cloud.automl_v1.types.ImageClassificationDatasetMetadata): Metadata for a dataset used for image classification. - text_classification_dataset_metadata (~.text.TextClassificationDatasetMetadata): + text_classification_dataset_metadata (google.cloud.automl_v1.types.TextClassificationDatasetMetadata): Metadata for a dataset used for text classification. - image_object_detection_dataset_metadata (~.image.ImageObjectDetectionDatasetMetadata): + image_object_detection_dataset_metadata (google.cloud.automl_v1.types.ImageObjectDetectionDatasetMetadata): Metadata for a dataset used for image object detection. - text_extraction_dataset_metadata (~.text.TextExtractionDatasetMetadata): + text_extraction_dataset_metadata (google.cloud.automl_v1.types.TextExtractionDatasetMetadata): Metadata for a dataset used for text extraction. - text_sentiment_dataset_metadata (~.text.TextSentimentDatasetMetadata): + text_sentiment_dataset_metadata (google.cloud.automl_v1.types.TextSentimentDatasetMetadata): Metadata for a dataset used for text sentiment. name (str): @@ -64,14 +64,14 @@ class Dataset(proto.Message): example_count (int): Output only. The number of examples in the dataset. - create_time (~.timestamp.Timestamp): + create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this dataset was created. etag (str): Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. - labels (Sequence[~.dataset.Dataset.LabelsEntry]): + labels (Sequence[google.cloud.automl_v1.types.Dataset.LabelsEntry]): Optional. The labels with user-defined metadata to organize your dataset. Label keys and values can be no longer than 64 diff --git a/google/cloud/automl_v1/types/detection.py b/google/cloud/automl_v1/types/detection.py index 69ca1d54..85510cd2 100644 --- a/google/cloud/automl_v1/types/detection.py +++ b/google/cloud/automl_v1/types/detection.py @@ -35,7 +35,7 @@ class ImageObjectDetectionAnnotation(proto.Message): r"""Annotation details for image object detection. Attributes: - bounding_box (~.geometry.BoundingPoly): + bounding_box (google.cloud.automl_v1.types.BoundingPoly): Output only. The rectangle representing the object location. score (float): @@ -62,7 +62,7 @@ class BoundingBoxMetricsEntry(proto.Message): mean_average_precision (float): Output only. The mean average precision, most often close to au_prc. - confidence_metrics_entries (Sequence[~.detection.BoundingBoxMetricsEntry.ConfidenceMetricsEntry]): + confidence_metrics_entries (Sequence[google.cloud.automl_v1.types.BoundingBoxMetricsEntry.ConfidenceMetricsEntry]): Output only. Metrics for each label-match confidence_threshold from 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99. Precision-recall @@ -113,7 +113,7 @@ class ImageObjectDetectionEvaluationMetrics(proto.Message): Output only. The total number of bounding boxes (i.e. summed over all images) the ground truth used to create this evaluation had. - bounding_box_metrics_entries (Sequence[~.detection.BoundingBoxMetricsEntry]): + bounding_box_metrics_entries (Sequence[google.cloud.automl_v1.types.BoundingBoxMetricsEntry]): Output only. The bounding boxes match metrics for each Intersection-over-union threshold 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 and each diff --git a/google/cloud/automl_v1/types/geometry.py b/google/cloud/automl_v1/types/geometry.py index 07c22dd9..80b73c89 100644 --- a/google/cloud/automl_v1/types/geometry.py +++ b/google/cloud/automl_v1/types/geometry.py @@ -49,7 +49,7 @@ class BoundingPoly(proto.Message): by connecting vertices in the order they are listed. Attributes: - normalized_vertices (Sequence[~.geometry.NormalizedVertex]): + normalized_vertices (Sequence[google.cloud.automl_v1.types.NormalizedVertex]): Output only . The bounding polygon normalized vertices. """ diff --git a/google/cloud/automl_v1/types/image.py b/google/cloud/automl_v1/types/image.py index d025fdc9..80846e42 100644 --- a/google/cloud/automl_v1/types/image.py +++ b/google/cloud/automl_v1/types/image.py @@ -38,7 +38,7 @@ class ImageClassificationDatasetMetadata(proto.Message): r"""Dataset metadata that is specific to image classification. Attributes: - classification_type (~.classification.ClassificationType): + classification_type (google.cloud.automl_v1.types.ClassificationType): Required. Type of the classification problem. """ diff --git a/google/cloud/automl_v1/types/io.py b/google/cloud/automl_v1/types/io.py index 34eee669..6fad8152 100644 --- a/google/cloud/automl_v1/types/io.py +++ b/google/cloud/automl_v1/types/io.py @@ -714,13 +714,13 @@ class InputConfig(proto.Message): in Operation.metadata.partial_failures. Attributes: - gcs_source (~.io.GcsSource): + gcs_source (google.cloud.automl_v1.types.GcsSource): The Google Cloud Storage location for the input content. For [AutoMl.ImportData][google.cloud.automl.v1.AutoMl.ImportData], ``gcs_source`` points to a CSV file with a structure described in [InputConfig][google.cloud.automl.v1.InputConfig]. - params (Sequence[~.io.InputConfig.ParamsEntry]): + params (Sequence[google.cloud.automl_v1.types.InputConfig.ParamsEntry]): Additional domain-specific parameters describing the semantic of the imported data, any string must be up to 25000 characters long. @@ -1075,7 +1075,7 @@ class BatchPredictInputConfig(proto.Message): listed in Operation.metadata.partial_failures. Attributes: - gcs_source (~.io.GcsSource): + gcs_source (google.cloud.automl_v1.types.GcsSource): Required. The Google Cloud Storage location for the input content. """ @@ -1090,7 +1090,7 @@ class DocumentInputConfig(proto.Message): [Document][google.cloud.automl.v1.Document]. Attributes: - gcs_source (~.io.GcsSource): + gcs_source (google.cloud.automl_v1.types.GcsSource): The Google Cloud Storage location of the document file. Only a single path should be given. @@ -1131,7 +1131,7 @@ class OutputConfig(proto.Message): filled with precisely the same data as this obtained on import. Attributes: - gcs_destination (~.io.GcsDestination): + gcs_destination (google.cloud.automl_v1.types.GcsDestination): Required. The Google Cloud Storage location where the output is to be written to. For Image Object Detection, Text Extraction, Video Classification and Tables, in the given @@ -1436,7 +1436,7 @@ class BatchPredictOutputConfig(proto.Message): ``message``. Attributes: - gcs_destination (~.io.GcsDestination): + gcs_destination (google.cloud.automl_v1.types.GcsDestination): Required. The Google Cloud Storage location of the directory where the output is to be written to. @@ -1451,7 +1451,7 @@ class ModelExportOutputConfig(proto.Message): r"""Output configuration for ModelExport Action. Attributes: - gcs_destination (~.io.GcsDestination): + gcs_destination (google.cloud.automl_v1.types.GcsDestination): Required. The Google Cloud Storage location where the model is to be written to. This location may only be set for the following model formats: "tflite", "edgetpu_tflite", @@ -1503,7 +1503,7 @@ class ModelExportOutputConfig(proto.Message): //cloud.google.com/vision/automl/docs/containers-gcs-quickstart) - core_ml - Used for iOS mobile devices. - params (Sequence[~.io.ModelExportOutputConfig.ParamsEntry]): + params (Sequence[google.cloud.automl_v1.types.ModelExportOutputConfig.ParamsEntry]): Additional model-type and format specific parameters describing the requirements for the to be exported model files, any string must be up to 25000 characters long. diff --git a/google/cloud/automl_v1/types/model.py b/google/cloud/automl_v1/types/model.py index 14664ae2..6c7a9392 100644 --- a/google/cloud/automl_v1/types/model.py +++ b/google/cloud/automl_v1/types/model.py @@ -31,17 +31,17 @@ class Model(proto.Message): r"""API proto representing a trained machine learning model. Attributes: - translation_model_metadata (~.translation.TranslationModelMetadata): + translation_model_metadata (google.cloud.automl_v1.types.TranslationModelMetadata): Metadata for translation models. - image_classification_model_metadata (~.image.ImageClassificationModelMetadata): + image_classification_model_metadata (google.cloud.automl_v1.types.ImageClassificationModelMetadata): Metadata for image classification models. - text_classification_model_metadata (~.text.TextClassificationModelMetadata): + text_classification_model_metadata (google.cloud.automl_v1.types.TextClassificationModelMetadata): Metadata for text classification models. - image_object_detection_model_metadata (~.image.ImageObjectDetectionModelMetadata): + image_object_detection_model_metadata (google.cloud.automl_v1.types.ImageObjectDetectionModelMetadata): Metadata for image object detection models. - text_extraction_model_metadata (~.text.TextExtractionModelMetadata): + text_extraction_model_metadata (google.cloud.automl_v1.types.TextExtractionModelMetadata): Metadata for text extraction models. - text_sentiment_model_metadata (~.text.TextSentimentModelMetadata): + text_sentiment_model_metadata (google.cloud.automl_v1.types.TextSentimentModelMetadata): Metadata for text sentiment models. name (str): Output only. Resource name of the model. Format: @@ -55,14 +55,14 @@ class Model(proto.Message): Required. The resource ID of the dataset used to create the model. The dataset must come from the same ancestor project and location. - create_time (~.timestamp.Timestamp): + create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when the model training finished and can be used for prediction. - update_time (~.timestamp.Timestamp): + update_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this model was last updated. - deployment_state (~.model.Model.DeploymentState): + deployment_state (google.cloud.automl_v1.types.Model.DeploymentState): Output only. Deployment state of the model. A model can only serve prediction requests after it gets deployed. @@ -70,7 +70,7 @@ class Model(proto.Message): Used to perform a consistent read-modify- rite updates. If not set, a blind "overwrite" update happens. - labels (Sequence[~.model.Model.LabelsEntry]): + labels (Sequence[google.cloud.automl_v1.types.Model.LabelsEntry]): Optional. The labels with user-defined metadata to organize your model. Label keys and values can be no longer than 64 diff --git a/google/cloud/automl_v1/types/model_evaluation.py b/google/cloud/automl_v1/types/model_evaluation.py index 39736edc..51a5cfe2 100644 --- a/google/cloud/automl_v1/types/model_evaluation.py +++ b/google/cloud/automl_v1/types/model_evaluation.py @@ -35,19 +35,19 @@ class ModelEvaluation(proto.Message): r"""Evaluation results of a model. Attributes: - classification_evaluation_metrics (~.classification.ClassificationEvaluationMetrics): + classification_evaluation_metrics (google.cloud.automl_v1.types.ClassificationEvaluationMetrics): Model evaluation metrics for image, text, video and tables classification. Tables problem is considered a classification when the target column is CATEGORY DataType. - translation_evaluation_metrics (~.translation.TranslationEvaluationMetrics): + translation_evaluation_metrics (google.cloud.automl_v1.types.TranslationEvaluationMetrics): Model evaluation metrics for translation. - image_object_detection_evaluation_metrics (~.detection.ImageObjectDetectionEvaluationMetrics): + image_object_detection_evaluation_metrics (google.cloud.automl_v1.types.ImageObjectDetectionEvaluationMetrics): Model evaluation metrics for image object detection. - text_sentiment_evaluation_metrics (~.text_sentiment.TextSentimentEvaluationMetrics): + text_sentiment_evaluation_metrics (google.cloud.automl_v1.types.TextSentimentEvaluationMetrics): Evaluation metrics for text sentiment models. - text_extraction_evaluation_metrics (~.text_extraction.TextExtractionEvaluationMetrics): + text_extraction_evaluation_metrics (google.cloud.automl_v1.types.TextExtractionEvaluationMetrics): Evaluation metrics for text extraction models. name (str): @@ -78,7 +78,7 @@ class ModelEvaluation(proto.Message): distinct values of the target column at the moment of the model evaluation are populated here. The display_name is empty for the overall model evaluation. - create_time (~.timestamp.Timestamp): + create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this model evaluation was created. evaluated_example_count (int): diff --git a/google/cloud/automl_v1/types/operations.py b/google/cloud/automl_v1/types/operations.py index e3ba76bd..d6aeee12 100644 --- a/google/cloud/automl_v1/types/operations.py +++ b/google/cloud/automl_v1/types/operations.py @@ -45,37 +45,37 @@ class OperationMetadata(proto.Message): AutoML API. Attributes: - delete_details (~.operations.DeleteOperationMetadata): + delete_details (google.cloud.automl_v1.types.DeleteOperationMetadata): Details of a Delete operation. - deploy_model_details (~.operations.DeployModelOperationMetadata): + deploy_model_details (google.cloud.automl_v1.types.DeployModelOperationMetadata): Details of a DeployModel operation. - undeploy_model_details (~.operations.UndeployModelOperationMetadata): + undeploy_model_details (google.cloud.automl_v1.types.UndeployModelOperationMetadata): Details of an UndeployModel operation. - create_model_details (~.operations.CreateModelOperationMetadata): + create_model_details (google.cloud.automl_v1.types.CreateModelOperationMetadata): Details of CreateModel operation. - create_dataset_details (~.operations.CreateDatasetOperationMetadata): + create_dataset_details (google.cloud.automl_v1.types.CreateDatasetOperationMetadata): Details of CreateDataset operation. - import_data_details (~.operations.ImportDataOperationMetadata): + import_data_details (google.cloud.automl_v1.types.ImportDataOperationMetadata): Details of ImportData operation. - batch_predict_details (~.operations.BatchPredictOperationMetadata): + batch_predict_details (google.cloud.automl_v1.types.BatchPredictOperationMetadata): Details of BatchPredict operation. - export_data_details (~.operations.ExportDataOperationMetadata): + export_data_details (google.cloud.automl_v1.types.ExportDataOperationMetadata): Details of ExportData operation. - export_model_details (~.operations.ExportModelOperationMetadata): + export_model_details (google.cloud.automl_v1.types.ExportModelOperationMetadata): Details of ExportModel operation. progress_percent (int): Output only. Progress of operation. Range: [0, 100]. Not used currently. - partial_failures (Sequence[~.status.Status]): + partial_failures (Sequence[google.rpc.status_pb2.Status]): Output only. Partial failures encountered. E.g. single files that couldn't be read. This field should never exceed 20 entries. Status details field will contain standard GCP error details. - create_time (~.timestamp.Timestamp): + create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Time when the operation was created. - update_time (~.timestamp.Timestamp): + update_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Time when the operation was updated for the last time. """ @@ -179,7 +179,7 @@ class ExportDataOperationMetadata(proto.Message): r"""Details of ExportData operation. Attributes: - output_info (~.operations.ExportDataOperationMetadata.ExportDataOutputInfo): + output_info (google.cloud.automl_v1.types.ExportDataOperationMetadata.ExportDataOutputInfo): Output only. Information further describing this export data's output. """ @@ -206,10 +206,10 @@ class BatchPredictOperationMetadata(proto.Message): r"""Details of BatchPredict operation. Attributes: - input_config (~.io.BatchPredictInputConfig): + input_config (google.cloud.automl_v1.types.BatchPredictInputConfig): Output only. The input config that was given upon starting this batch predict operation. - output_info (~.operations.BatchPredictOperationMetadata.BatchPredictOutputInfo): + output_info (google.cloud.automl_v1.types.BatchPredictOperationMetadata.BatchPredictOutputInfo): Output only. Information further describing this batch predict's output. """ @@ -241,7 +241,7 @@ class ExportModelOperationMetadata(proto.Message): r"""Details of ExportModel operation. Attributes: - output_info (~.operations.ExportModelOperationMetadata.ExportModelOutputInfo): + output_info (google.cloud.automl_v1.types.ExportModelOperationMetadata.ExportModelOutputInfo): Output only. Information further describing the output of this model export. """ diff --git a/google/cloud/automl_v1/types/prediction_service.py b/google/cloud/automl_v1/types/prediction_service.py index 3b85f497..9805f522 100644 --- a/google/cloud/automl_v1/types/prediction_service.py +++ b/google/cloud/automl_v1/types/prediction_service.py @@ -42,11 +42,11 @@ class PredictRequest(proto.Message): name (str): Required. Name of the model requested to serve the prediction. - payload (~.data_items.ExamplePayload): + payload (google.cloud.automl_v1.types.ExamplePayload): Required. Payload to perform a prediction on. The payload must match the problem type that the model was trained to solve. - params (Sequence[~.prediction_service.PredictRequest.ParamsEntry]): + params (Sequence[google.cloud.automl_v1.types.PredictRequest.ParamsEntry]): Additional domain-specific parameters, any string must be up to 25000 characters long. @@ -90,11 +90,11 @@ class PredictResponse(proto.Message): [PredictionService.Predict][google.cloud.automl.v1.PredictionService.Predict]. Attributes: - payload (Sequence[~.annotation_payload.AnnotationPayload]): + payload (Sequence[google.cloud.automl_v1.types.AnnotationPayload]): Prediction result. AutoML Translation and AutoML Natural Language Sentiment Analysis return precisely one payload. - preprocessed_input (~.data_items.ExamplePayload): + preprocessed_input (google.cloud.automl_v1.types.ExamplePayload): The preprocessed example that AutoML actually makes prediction on. Empty if AutoML does not preprocess the input example. @@ -104,7 +104,7 @@ class PredictResponse(proto.Message): document, the recognized text is returned in the [document_text][google.cloud.automl.v1.Document.document_text] property. - metadata (Sequence[~.prediction_service.PredictResponse.MetadataEntry]): + metadata (Sequence[google.cloud.automl_v1.types.PredictResponse.MetadataEntry]): Additional domain-specific prediction response metadata. AutoML Vision Object Detection @@ -144,13 +144,13 @@ class BatchPredictRequest(proto.Message): name (str): Required. Name of the model requested to serve the batch prediction. - input_config (~.io.BatchPredictInputConfig): + input_config (google.cloud.automl_v1.types.BatchPredictInputConfig): Required. The input configuration for batch prediction. - output_config (~.io.BatchPredictOutputConfig): + output_config (google.cloud.automl_v1.types.BatchPredictOutputConfig): Required. The Configuration specifying where output predictions should be written. - params (Sequence[~.prediction_service.BatchPredictRequest.ParamsEntry]): + params (Sequence[google.cloud.automl_v1.types.BatchPredictRequest.ParamsEntry]): Additional domain-specific parameters for the predictions, any string must be up to 25000 characters long. @@ -255,7 +255,7 @@ class BatchPredictResult(proto.Message): [PredictionService.BatchPredict][google.cloud.automl.v1.PredictionService.BatchPredict]. Attributes: - metadata (Sequence[~.prediction_service.BatchPredictResult.MetadataEntry]): + metadata (Sequence[google.cloud.automl_v1.types.BatchPredictResult.MetadataEntry]): Additional domain-specific prediction response metadata. AutoML Vision Object Detection diff --git a/google/cloud/automl_v1/types/service.py b/google/cloud/automl_v1/types/service.py index 57211809..bb84d854 100644 --- a/google/cloud/automl_v1/types/service.py +++ b/google/cloud/automl_v1/types/service.py @@ -62,7 +62,7 @@ class CreateDatasetRequest(proto.Message): parent (str): Required. The resource name of the project to create the dataset for. - dataset (~.gca_dataset.Dataset): + dataset (google.cloud.automl_v1.types.Dataset): Required. The dataset to create. """ @@ -128,7 +128,7 @@ class ListDatasetsResponse(proto.Message): [AutoMl.ListDatasets][google.cloud.automl.v1.AutoMl.ListDatasets]. Attributes: - datasets (Sequence[~.gca_dataset.Dataset]): + datasets (Sequence[google.cloud.automl_v1.types.Dataset]): The datasets read. next_page_token (str): A token to retrieve next page of results. Pass to @@ -152,10 +152,10 @@ class UpdateDatasetRequest(proto.Message): [AutoMl.UpdateDataset][google.cloud.automl.v1.AutoMl.UpdateDataset] Attributes: - dataset (~.gca_dataset.Dataset): + dataset (google.cloud.automl_v1.types.Dataset): Required. The dataset which replaces the resource on the server. - update_mask (~.field_mask.FieldMask): + update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. The update mask applies to the resource. """ @@ -187,7 +187,7 @@ class ImportDataRequest(proto.Message): Required. Dataset name. Dataset must already exist. All imported annotations and examples will be added. - input_config (~.io.InputConfig): + input_config (google.cloud.automl_v1.types.InputConfig): Required. The desired input location and its domain specific semantics, if any. """ @@ -204,7 +204,7 @@ class ExportDataRequest(proto.Message): Attributes: name (str): Required. The resource name of the dataset. - output_config (~.io.OutputConfig): + output_config (google.cloud.automl_v1.types.OutputConfig): Required. The desired output location. """ @@ -234,7 +234,7 @@ class CreateModelRequest(proto.Message): parent (str): Required. Resource name of the parent project where the model is being created. - model (~.gca_model.Model): + model (google.cloud.automl_v1.types.Model): Required. The model to create. """ @@ -302,7 +302,7 @@ class ListModelsResponse(proto.Message): [AutoMl.ListModels][google.cloud.automl.v1.AutoMl.ListModels]. Attributes: - model (Sequence[~.gca_model.Model]): + model (Sequence[google.cloud.automl_v1.types.Model]): List of models in the requested page. next_page_token (str): A token to retrieve next page of results. Pass to @@ -337,10 +337,10 @@ class UpdateModelRequest(proto.Message): [AutoMl.UpdateModel][google.cloud.automl.v1.AutoMl.UpdateModel] Attributes: - model (~.gca_model.Model): + model (google.cloud.automl_v1.types.Model): Required. The model which replaces the resource on the server. - update_mask (~.field_mask.FieldMask): + update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. The update mask applies to the resource. """ @@ -355,10 +355,10 @@ class DeployModelRequest(proto.Message): [AutoMl.DeployModel][google.cloud.automl.v1.AutoMl.DeployModel]. Attributes: - image_object_detection_model_deployment_metadata (~.image.ImageObjectDetectionModelDeploymentMetadata): + image_object_detection_model_deployment_metadata (google.cloud.automl_v1.types.ImageObjectDetectionModelDeploymentMetadata): Model deployment metadata specific to Image Object Detection. - image_classification_model_deployment_metadata (~.image.ImageClassificationModelDeploymentMetadata): + image_classification_model_deployment_metadata (google.cloud.automl_v1.types.ImageClassificationModelDeploymentMetadata): Model deployment metadata specific to Image Classification. name (str): @@ -406,7 +406,7 @@ class ExportModelRequest(proto.Message): name (str): Required. The resource name of the model to export. - output_config (~.io.ModelExportOutputConfig): + output_config (google.cloud.automl_v1.types.ModelExportOutputConfig): Required. The desired output location and configuration. """ @@ -479,7 +479,7 @@ class ListModelEvaluationsResponse(proto.Message): [AutoMl.ListModelEvaluations][google.cloud.automl.v1.AutoMl.ListModelEvaluations]. Attributes: - model_evaluation (Sequence[~.gca_model_evaluation.ModelEvaluation]): + model_evaluation (Sequence[google.cloud.automl_v1.types.ModelEvaluation]): List of model evaluations in the requested page. next_page_token (str): diff --git a/google/cloud/automl_v1/types/text.py b/google/cloud/automl_v1/types/text.py index 72039612..360a4b02 100644 --- a/google/cloud/automl_v1/types/text.py +++ b/google/cloud/automl_v1/types/text.py @@ -38,7 +38,7 @@ class TextClassificationDatasetMetadata(proto.Message): r"""Dataset metadata for classification. Attributes: - classification_type (~.classification.ClassificationType): + classification_type (google.cloud.automl_v1.types.ClassificationType): Required. Type of the classification problem. """ @@ -51,7 +51,7 @@ class TextClassificationModelMetadata(proto.Message): r"""Model metadata that is specific to text classification. Attributes: - classification_type (~.classification.ClassificationType): + classification_type (google.cloud.automl_v1.types.ClassificationType): Output only. Classification type of the dataset used to train this model. """ diff --git a/google/cloud/automl_v1/types/text_extraction.py b/google/cloud/automl_v1/types/text_extraction.py index 19f1eb30..834b14ac 100644 --- a/google/cloud/automl_v1/types/text_extraction.py +++ b/google/cloud/automl_v1/types/text_extraction.py @@ -31,7 +31,7 @@ class TextExtractionAnnotation(proto.Message): r"""Annotation for identifying spans of text. Attributes: - text_segment (~.gca_text_segment.TextSegment): + text_segment (google.cloud.automl_v1.types.TextSegment): An entity annotation will set this, which is the part of the original text to which the annotation pertains. @@ -58,7 +58,7 @@ class TextExtractionEvaluationMetrics(proto.Message): au_prc (float): Output only. The Area under precision recall curve metric. - confidence_metrics_entries (Sequence[~.text_extraction.TextExtractionEvaluationMetrics.ConfidenceMetricsEntry]): + confidence_metrics_entries (Sequence[google.cloud.automl_v1.types.TextExtractionEvaluationMetrics.ConfidenceMetricsEntry]): Output only. Metrics that have confidence thresholds. Precision-recall curve can be derived from it. diff --git a/google/cloud/automl_v1/types/text_sentiment.py b/google/cloud/automl_v1/types/text_sentiment.py index 576416e2..52dcef02 100644 --- a/google/cloud/automl_v1/types/text_sentiment.py +++ b/google/cloud/automl_v1/types/text_sentiment.py @@ -80,7 +80,7 @@ class TextSentimentEvaluationMetrics(proto.Message): Output only. Quadratic weighted kappa. Only set for the overall model evaluation, not for evaluation of a single annotation spec. - confusion_matrix (~.classification.ClassificationEvaluationMetrics.ConfusionMatrix): + confusion_matrix (google.cloud.automl_v1.types.ClassificationEvaluationMetrics.ConfusionMatrix): Output only. Confusion matrix of the evaluation. Only set for the overall model evaluation, not for evaluation of a single diff --git a/google/cloud/automl_v1/types/translation.py b/google/cloud/automl_v1/types/translation.py index acea0220..f7b7d6ef 100644 --- a/google/cloud/automl_v1/types/translation.py +++ b/google/cloud/automl_v1/types/translation.py @@ -93,7 +93,7 @@ class TranslationAnnotation(proto.Message): r"""Annotation details specific to translation. Attributes: - translated_content (~.data_items.TextSnippet): + translated_content (google.cloud.automl_v1.types.TextSnippet): Output only . The translated content. """ diff --git a/google/cloud/automl_v1beta1/__init__.py b/google/cloud/automl_v1beta1/__init__.py index 35f83585..904a45aa 100644 --- a/google/cloud/automl_v1beta1/__init__.py +++ b/google/cloud/automl_v1beta1/__init__.py @@ -149,7 +149,6 @@ "AnnotationPayload", "AnnotationSpec", "ArrayStats", - "AutoMlClient", "BatchPredictInputConfig", "BatchPredictOperationMetadata", "BatchPredictOutputConfig", @@ -228,6 +227,7 @@ "OutputConfig", "PredictRequest", "PredictResponse", + "PredictionServiceClient", "RegressionEvaluationMetrics", "Row", "StringStats", @@ -269,5 +269,5 @@ "VideoObjectTrackingDatasetMetadata", "VideoObjectTrackingEvaluationMetrics", "VideoObjectTrackingModelMetadata", - "PredictionServiceClient", + "AutoMlClient", ) diff --git a/google/cloud/automl_v1beta1/services/auto_ml/async_client.py b/google/cloud/automl_v1beta1/services/auto_ml/async_client.py index cd2c4388..f610615c 100644 --- a/google/cloud/automl_v1beta1/services/auto_ml/async_client.py +++ b/google/cloud/automl_v1beta1/services/auto_ml/async_client.py @@ -119,7 +119,36 @@ class AutoMlAsyncClient: common_location_path = staticmethod(AutoMlClient.common_location_path) parse_common_location_path = staticmethod(AutoMlClient.parse_common_location_path) - from_service_account_file = AutoMlClient.from_service_account_file + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + AutoMlAsyncClient: The constructed client. + """ + return AutoMlClient.from_service_account_info.__func__(AutoMlAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + AutoMlAsyncClient: The constructed client. + """ + return AutoMlClient.from_service_account_file.__func__(AutoMlAsyncClient, filename, *args, **kwargs) # type: ignore + from_service_account_json = from_service_account_file @property @@ -196,16 +225,17 @@ async def create_dataset( r"""Creates a dataset. Args: - request (:class:`~.service.CreateDatasetRequest`): + request (:class:`google.cloud.automl_v1beta1.types.CreateDatasetRequest`): The request object. Request message for [AutoMl.CreateDataset][google.cloud.automl.v1beta1.AutoMl.CreateDataset]. parent (:class:`str`): Required. The resource name of the project to create the dataset for. + This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - dataset (:class:`~.gca_dataset.Dataset`): + dataset (:class:`google.cloud.automl_v1beta1.types.Dataset`): Required. The dataset to create. This corresponds to the ``dataset`` field on the ``request`` instance; if ``request`` is provided, this @@ -218,7 +248,7 @@ async def create_dataset( sent along with the request as metadata. Returns: - ~.gca_dataset.Dataset: + google.cloud.automl_v1beta1.types.Dataset: A workspace for solving a single, particular machine learning (ML) problem. A workspace contains examples @@ -277,12 +307,13 @@ async def get_dataset( r"""Gets a dataset. Args: - request (:class:`~.service.GetDatasetRequest`): + request (:class:`google.cloud.automl_v1beta1.types.GetDatasetRequest`): The request object. Request message for [AutoMl.GetDataset][google.cloud.automl.v1beta1.AutoMl.GetDataset]. name (:class:`str`): Required. The resource name of the dataset to retrieve. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -294,7 +325,7 @@ async def get_dataset( sent along with the request as metadata. Returns: - ~.dataset.Dataset: + google.cloud.automl_v1beta1.types.Dataset: A workspace for solving a single, particular machine learning (ML) problem. A workspace contains examples @@ -330,6 +361,7 @@ async def get_dataset( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=5.0, ), default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, @@ -359,12 +391,13 @@ async def list_datasets( r"""Lists datasets in a project. Args: - request (:class:`~.service.ListDatasetsRequest`): + request (:class:`google.cloud.automl_v1beta1.types.ListDatasetsRequest`): The request object. Request message for [AutoMl.ListDatasets][google.cloud.automl.v1beta1.AutoMl.ListDatasets]. parent (:class:`str`): Required. The resource name of the project from which to list datasets. + This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -376,7 +409,7 @@ async def list_datasets( sent along with the request as metadata. Returns: - ~.pagers.ListDatasetsAsyncPager: + google.cloud.automl_v1beta1.services.auto_ml.pagers.ListDatasetsAsyncPager: Response message for [AutoMl.ListDatasets][google.cloud.automl.v1beta1.AutoMl.ListDatasets]. @@ -413,6 +446,7 @@ async def list_datasets( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=5.0, ), default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, @@ -448,12 +482,13 @@ async def update_dataset( r"""Updates a dataset. Args: - request (:class:`~.service.UpdateDatasetRequest`): + request (:class:`google.cloud.automl_v1beta1.types.UpdateDatasetRequest`): The request object. Request message for [AutoMl.UpdateDataset][google.cloud.automl.v1beta1.AutoMl.UpdateDataset] - dataset (:class:`~.gca_dataset.Dataset`): + dataset (:class:`google.cloud.automl_v1beta1.types.Dataset`): Required. The dataset which replaces the resource on the server. + This corresponds to the ``dataset`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -465,7 +500,7 @@ async def update_dataset( sent along with the request as metadata. Returns: - ~.gca_dataset.Dataset: + google.cloud.automl_v1beta1.types.Dataset: A workspace for solving a single, particular machine learning (ML) problem. A workspace contains examples @@ -528,12 +563,13 @@ async def delete_dataset( [metadata][google.longrunning.Operation.metadata] field. Args: - request (:class:`~.service.DeleteDatasetRequest`): + request (:class:`google.cloud.automl_v1beta1.types.DeleteDatasetRequest`): The request object. Request message for [AutoMl.DeleteDataset][google.cloud.automl.v1beta1.AutoMl.DeleteDataset]. name (:class:`str`): Required. The resource name of the dataset to delete. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -545,24 +581,22 @@ async def delete_dataset( sent along with the request as metadata. Returns: - ~.operation_async.AsyncOperation: + google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that - you can re-use to avoid defining duplicated empty - messages in your APIs. A typical example is to use it as - the request or the response type of an API method. For - instance: + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: - :: + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); - service Foo { - rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); - } + } - The JSON representation for ``Empty`` is empty JSON - object ``{}``. + The JSON representation for Empty is empty JSON + object {}. """ # Create or coerce a protobuf request object. @@ -594,6 +628,7 @@ async def delete_dataset( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=5.0, ), default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, @@ -641,20 +676,22 @@ async def import_data( field when it completes. Args: - request (:class:`~.service.ImportDataRequest`): + request (:class:`google.cloud.automl_v1beta1.types.ImportDataRequest`): The request object. Request message for [AutoMl.ImportData][google.cloud.automl.v1beta1.AutoMl.ImportData]. name (:class:`str`): Required. Dataset name. Dataset must already exist. All imported annotations and examples will be added. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - input_config (:class:`~.io.InputConfig`): + input_config (:class:`google.cloud.automl_v1beta1.types.InputConfig`): Required. The desired input location and its domain specific semantics, if any. + This corresponds to the ``input_config`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -666,24 +703,22 @@ async def import_data( sent along with the request as metadata. Returns: - ~.operation_async.AsyncOperation: + google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that - you can re-use to avoid defining duplicated empty - messages in your APIs. A typical example is to use it as - the request or the response type of an API method. For - instance: + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: - :: + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); - service Foo { - rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); - } + } - The JSON representation for ``Empty`` is empty JSON - object ``{}``. + The JSON representation for Empty is empty JSON + object {}. """ # Create or coerce a protobuf request object. @@ -750,18 +785,20 @@ async def export_data( completes. Args: - request (:class:`~.service.ExportDataRequest`): + request (:class:`google.cloud.automl_v1beta1.types.ExportDataRequest`): The request object. Request message for [AutoMl.ExportData][google.cloud.automl.v1beta1.AutoMl.ExportData]. name (:class:`str`): Required. The resource name of the dataset. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - output_config (:class:`~.io.OutputConfig`): + output_config (:class:`google.cloud.automl_v1beta1.types.OutputConfig`): Required. The desired output location. + This corresponds to the ``output_config`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -773,24 +810,22 @@ async def export_data( sent along with the request as metadata. Returns: - ~.operation_async.AsyncOperation: + google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that - you can re-use to avoid defining duplicated empty - messages in your APIs. A typical example is to use it as - the request or the response type of an API method. For - instance: + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: - :: + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); - service Foo { - rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); - } + } - The JSON representation for ``Empty`` is empty JSON - object ``{}``. + The JSON representation for Empty is empty JSON + object {}. """ # Create or coerce a protobuf request object. @@ -853,12 +888,13 @@ async def get_annotation_spec( r"""Gets an annotation spec. Args: - request (:class:`~.service.GetAnnotationSpecRequest`): + request (:class:`google.cloud.automl_v1beta1.types.GetAnnotationSpecRequest`): The request object. Request message for [AutoMl.GetAnnotationSpec][google.cloud.automl.v1beta1.AutoMl.GetAnnotationSpec]. name (:class:`str`): Required. The resource name of the annotation spec to retrieve. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -870,7 +906,7 @@ async def get_annotation_spec( sent along with the request as metadata. Returns: - ~.annotation_spec.AnnotationSpec: + google.cloud.automl_v1beta1.types.AnnotationSpec: A definition of an annotation spec. """ # Create or coerce a protobuf request object. @@ -902,6 +938,7 @@ async def get_annotation_spec( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=5.0, ), default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, @@ -931,12 +968,13 @@ async def get_table_spec( r"""Gets a table spec. Args: - request (:class:`~.service.GetTableSpecRequest`): + request (:class:`google.cloud.automl_v1beta1.types.GetTableSpecRequest`): The request object. Request message for [AutoMl.GetTableSpec][google.cloud.automl.v1beta1.AutoMl.GetTableSpec]. name (:class:`str`): Required. The resource name of the table spec to retrieve. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -948,18 +986,17 @@ async def get_table_spec( sent along with the request as metadata. Returns: - ~.table_spec.TableSpec: - A specification of a relational table. The table's - schema is represented via its child column specs. It is - pre-populated as part of ImportData by schema inference - algorithm, the version of which is a required parameter - of ImportData InputConfig. Note: While working with a - table, at times the schema may be inconsistent with the - data in the table (e.g. string in a FLOAT64 column). The - consistency validation is done upon creation of a model. - Used by: - - - Tables + google.cloud.automl_v1beta1.types.TableSpec: + A specification of a relational table. + The table's schema is represented via its child + column specs. It is pre-populated as part of + ImportData by schema inference algorithm, the version + of which is a required parameter of ImportData + InputConfig. Note: While working with a table, at + times the schema may be inconsistent with the data in + the table (e.g. string in a FLOAT64 column). The + consistency validation is done upon creation of a + model. Used by: \* Tables """ # Create or coerce a protobuf request object. @@ -991,6 +1028,7 @@ async def get_table_spec( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=5.0, ), default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, @@ -1020,12 +1058,13 @@ async def list_table_specs( r"""Lists table specs in a dataset. Args: - request (:class:`~.service.ListTableSpecsRequest`): + request (:class:`google.cloud.automl_v1beta1.types.ListTableSpecsRequest`): The request object. Request message for [AutoMl.ListTableSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs]. parent (:class:`str`): Required. The resource name of the dataset to list table specs from. + This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -1037,7 +1076,7 @@ async def list_table_specs( sent along with the request as metadata. Returns: - ~.pagers.ListTableSpecsAsyncPager: + google.cloud.automl_v1beta1.services.auto_ml.pagers.ListTableSpecsAsyncPager: Response message for [AutoMl.ListTableSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs]. @@ -1074,6 +1113,7 @@ async def list_table_specs( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=5.0, ), default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, @@ -1109,12 +1149,13 @@ async def update_table_spec( r"""Updates a table spec. Args: - request (:class:`~.service.UpdateTableSpecRequest`): + request (:class:`google.cloud.automl_v1beta1.types.UpdateTableSpecRequest`): The request object. Request message for [AutoMl.UpdateTableSpec][google.cloud.automl.v1beta1.AutoMl.UpdateTableSpec] - table_spec (:class:`~.gca_table_spec.TableSpec`): + table_spec (:class:`google.cloud.automl_v1beta1.types.TableSpec`): Required. The table spec which replaces the resource on the server. + This corresponds to the ``table_spec`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -1126,18 +1167,17 @@ async def update_table_spec( sent along with the request as metadata. Returns: - ~.gca_table_spec.TableSpec: - A specification of a relational table. The table's - schema is represented via its child column specs. It is - pre-populated as part of ImportData by schema inference - algorithm, the version of which is a required parameter - of ImportData InputConfig. Note: While working with a - table, at times the schema may be inconsistent with the - data in the table (e.g. string in a FLOAT64 column). The - consistency validation is done upon creation of a model. - Used by: - - - Tables + google.cloud.automl_v1beta1.types.TableSpec: + A specification of a relational table. + The table's schema is represented via its child + column specs. It is pre-populated as part of + ImportData by schema inference algorithm, the version + of which is a required parameter of ImportData + InputConfig. Note: While working with a table, at + times the schema may be inconsistent with the data in + the table (e.g. string in a FLOAT64 column). The + consistency validation is done upon creation of a + model. Used by: \* Tables """ # Create or coerce a protobuf request object. @@ -1192,12 +1232,13 @@ async def get_column_spec( r"""Gets a column spec. Args: - request (:class:`~.service.GetColumnSpecRequest`): + request (:class:`google.cloud.automl_v1beta1.types.GetColumnSpecRequest`): The request object. Request message for [AutoMl.GetColumnSpec][google.cloud.automl.v1beta1.AutoMl.GetColumnSpec]. name (:class:`str`): Required. The resource name of the column spec to retrieve. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -1209,12 +1250,9 @@ async def get_column_spec( sent along with the request as metadata. Returns: - ~.column_spec.ColumnSpec: - A representation of a column in a relational table. When - listing them, column specs are returned in the same - order in which they were given on import . Used by: - - - Tables + google.cloud.automl_v1beta1.types.ColumnSpec: + A representation of a column in a relational table. When listing them, column specs are returned in the same order in which they were + given on import . Used by: \* Tables """ # Create or coerce a protobuf request object. @@ -1246,6 +1284,7 @@ async def get_column_spec( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=5.0, ), default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, @@ -1275,12 +1314,13 @@ async def list_column_specs( r"""Lists column specs in a table spec. Args: - request (:class:`~.service.ListColumnSpecsRequest`): + request (:class:`google.cloud.automl_v1beta1.types.ListColumnSpecsRequest`): The request object. Request message for [AutoMl.ListColumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs]. parent (:class:`str`): Required. The resource name of the table spec to list column specs from. + This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -1292,7 +1332,7 @@ async def list_column_specs( sent along with the request as metadata. Returns: - ~.pagers.ListColumnSpecsAsyncPager: + google.cloud.automl_v1beta1.services.auto_ml.pagers.ListColumnSpecsAsyncPager: Response message for [AutoMl.ListColumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs]. @@ -1329,6 +1369,7 @@ async def list_column_specs( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=5.0, ), default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, @@ -1364,12 +1405,13 @@ async def update_column_spec( r"""Updates a column spec. Args: - request (:class:`~.service.UpdateColumnSpecRequest`): + request (:class:`google.cloud.automl_v1beta1.types.UpdateColumnSpecRequest`): The request object. Request message for [AutoMl.UpdateColumnSpec][google.cloud.automl.v1beta1.AutoMl.UpdateColumnSpec] - column_spec (:class:`~.gca_column_spec.ColumnSpec`): + column_spec (:class:`google.cloud.automl_v1beta1.types.ColumnSpec`): Required. The column spec which replaces the resource on the server. + This corresponds to the ``column_spec`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -1381,12 +1423,9 @@ async def update_column_spec( sent along with the request as metadata. Returns: - ~.gca_column_spec.ColumnSpec: - A representation of a column in a relational table. When - listing them, column specs are returned in the same - order in which they were given on import . Used by: - - - Tables + google.cloud.automl_v1beta1.types.ColumnSpec: + A representation of a column in a relational table. When listing them, column specs are returned in the same order in which they were + given on import . Used by: \* Tables """ # Create or coerce a protobuf request object. @@ -1446,17 +1485,18 @@ async def create_model( each annotation spec. Args: - request (:class:`~.service.CreateModelRequest`): + request (:class:`google.cloud.automl_v1beta1.types.CreateModelRequest`): The request object. Request message for [AutoMl.CreateModel][google.cloud.automl.v1beta1.AutoMl.CreateModel]. parent (:class:`str`): Required. Resource name of the parent project where the model is being created. + This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - model (:class:`~.gca_model.Model`): + model (:class:`google.cloud.automl_v1beta1.types.Model`): Required. The model to create. This corresponds to the ``model`` field on the ``request`` instance; if ``request`` is provided, this @@ -1469,12 +1509,12 @@ async def create_model( sent along with the request as metadata. Returns: - ~.operation_async.AsyncOperation: + google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. The result type for the operation will be - :class:``~.gca_model.Model``: API proto representing a - trained machine learning model. + :class:`google.cloud.automl_v1beta1.types.Model` API + proto representing a trained machine learning model. """ # Create or coerce a protobuf request object. @@ -1537,7 +1577,7 @@ async def get_model( r"""Gets a model. Args: - request (:class:`~.service.GetModelRequest`): + request (:class:`google.cloud.automl_v1beta1.types.GetModelRequest`): The request object. Request message for [AutoMl.GetModel][google.cloud.automl.v1beta1.AutoMl.GetModel]. name (:class:`str`): @@ -1553,7 +1593,7 @@ async def get_model( sent along with the request as metadata. Returns: - ~.model.Model: + google.cloud.automl_v1beta1.types.Model: API proto representing a trained machine learning model. @@ -1587,6 +1627,7 @@ async def get_model( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=5.0, ), default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, @@ -1616,12 +1657,13 @@ async def list_models( r"""Lists models. Args: - request (:class:`~.service.ListModelsRequest`): + request (:class:`google.cloud.automl_v1beta1.types.ListModelsRequest`): The request object. Request message for [AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels]. parent (:class:`str`): Required. Resource name of the project, from which to list the models. + This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -1633,7 +1675,7 @@ async def list_models( sent along with the request as metadata. Returns: - ~.pagers.ListModelsAsyncPager: + google.cloud.automl_v1beta1.services.auto_ml.pagers.ListModelsAsyncPager: Response message for [AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels]. @@ -1670,6 +1712,7 @@ async def list_models( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=5.0, ), default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, @@ -1708,12 +1751,13 @@ async def delete_model( [metadata][google.longrunning.Operation.metadata] field. Args: - request (:class:`~.service.DeleteModelRequest`): + request (:class:`google.cloud.automl_v1beta1.types.DeleteModelRequest`): The request object. Request message for [AutoMl.DeleteModel][google.cloud.automl.v1beta1.AutoMl.DeleteModel]. name (:class:`str`): Required. Resource name of the model being deleted. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -1725,24 +1769,22 @@ async def delete_model( sent along with the request as metadata. Returns: - ~.operation_async.AsyncOperation: + google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that - you can re-use to avoid defining duplicated empty - messages in your APIs. A typical example is to use it as - the request or the response type of an API method. For - instance: + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: - :: + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); - service Foo { - rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); - } + } - The JSON representation for ``Empty`` is empty JSON - object ``{}``. + The JSON representation for Empty is empty JSON + object {}. """ # Create or coerce a protobuf request object. @@ -1774,6 +1816,7 @@ async def delete_model( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=5.0, ), default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, @@ -1825,12 +1868,13 @@ async def deploy_model( completes. Args: - request (:class:`~.service.DeployModelRequest`): + request (:class:`google.cloud.automl_v1beta1.types.DeployModelRequest`): The request object. Request message for [AutoMl.DeployModel][google.cloud.automl.v1beta1.AutoMl.DeployModel]. name (:class:`str`): Required. Resource name of the model to deploy. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -1842,24 +1886,22 @@ async def deploy_model( sent along with the request as metadata. Returns: - ~.operation_async.AsyncOperation: + google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that - you can re-use to avoid defining duplicated empty - messages in your APIs. A typical example is to use it as - the request or the response type of an API method. For - instance: + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: - :: + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); - service Foo { - rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); - } + } - The JSON representation for ``Empty`` is empty JSON - object ``{}``. + The JSON representation for Empty is empty JSON + object {}. """ # Create or coerce a protobuf request object. @@ -1928,12 +1970,13 @@ async def undeploy_model( completes. Args: - request (:class:`~.service.UndeployModelRequest`): + request (:class:`google.cloud.automl_v1beta1.types.UndeployModelRequest`): The request object. Request message for [AutoMl.UndeployModel][google.cloud.automl.v1beta1.AutoMl.UndeployModel]. name (:class:`str`): Required. Resource name of the model to undeploy. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -1945,24 +1988,22 @@ async def undeploy_model( sent along with the request as metadata. Returns: - ~.operation_async.AsyncOperation: + google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that - you can re-use to avoid defining duplicated empty - messages in your APIs. A typical example is to use it as - the request or the response type of an API method. For - instance: + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: - :: + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); - service Foo { - rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); - } + } - The JSON representation for ``Empty`` is empty JSON - object ``{}``. + The JSON representation for Empty is empty JSON + object {}. """ # Create or coerce a protobuf request object. @@ -2032,7 +2073,7 @@ async def export_model( completes. Args: - request (:class:`~.service.ExportModelRequest`): + request (:class:`google.cloud.automl_v1beta1.types.ExportModelRequest`): The request object. Request message for [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]. Models need to be enabled for exporting, otherwise an @@ -2040,12 +2081,14 @@ async def export_model( name (:class:`str`): Required. The resource name of the model to export. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - output_config (:class:`~.io.ModelExportOutputConfig`): + output_config (:class:`google.cloud.automl_v1beta1.types.ModelExportOutputConfig`): Required. The desired output location and configuration. + This corresponds to the ``output_config`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -2057,24 +2100,22 @@ async def export_model( sent along with the request as metadata. Returns: - ~.operation_async.AsyncOperation: + google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that - you can re-use to avoid defining duplicated empty - messages in your APIs. A typical example is to use it as - the request or the response type of an API method. For - instance: + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: - :: + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); - service Foo { - rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); - } + } - The JSON representation for ``Empty`` is empty JSON - object ``{}``. + The JSON representation for Empty is empty JSON + object {}. """ # Create or coerce a protobuf request object. @@ -2152,19 +2193,21 @@ async def export_evaluated_examples( completes. Args: - request (:class:`~.service.ExportEvaluatedExamplesRequest`): + request (:class:`google.cloud.automl_v1beta1.types.ExportEvaluatedExamplesRequest`): The request object. Request message for [AutoMl.ExportEvaluatedExamples][google.cloud.automl.v1beta1.AutoMl.ExportEvaluatedExamples]. name (:class:`str`): Required. The resource name of the model whose evaluated examples are to be exported. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - output_config (:class:`~.io.ExportEvaluatedExamplesOutputConfig`): + output_config (:class:`google.cloud.automl_v1beta1.types.ExportEvaluatedExamplesOutputConfig`): Required. The desired output location and configuration. + This corresponds to the ``output_config`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -2176,24 +2219,22 @@ async def export_evaluated_examples( sent along with the request as metadata. Returns: - ~.operation_async.AsyncOperation: + google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that - you can re-use to avoid defining duplicated empty - messages in your APIs. A typical example is to use it as - the request or the response type of an API method. For - instance: + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: - :: + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); - service Foo { - rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); - } + } - The JSON representation for ``Empty`` is empty JSON - object ``{}``. + The JSON representation for Empty is empty JSON + object {}. """ # Create or coerce a protobuf request object. @@ -2256,12 +2297,13 @@ async def get_model_evaluation( r"""Gets a model evaluation. Args: - request (:class:`~.service.GetModelEvaluationRequest`): + request (:class:`google.cloud.automl_v1beta1.types.GetModelEvaluationRequest`): The request object. Request message for [AutoMl.GetModelEvaluation][google.cloud.automl.v1beta1.AutoMl.GetModelEvaluation]. name (:class:`str`): Required. Resource name for the model evaluation. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -2273,7 +2315,7 @@ async def get_model_evaluation( sent along with the request as metadata. Returns: - ~.model_evaluation.ModelEvaluation: + google.cloud.automl_v1beta1.types.ModelEvaluation: Evaluation results of a model. """ # Create or coerce a protobuf request object. @@ -2305,6 +2347,7 @@ async def get_model_evaluation( predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=5.0, ), default_timeout=5.0, client_info=DEFAULT_CLIENT_INFO, @@ -2334,7 +2377,7 @@ async def list_model_evaluations( r"""Lists model evaluations. Args: - request (:class:`~.service.ListModelEvaluationsRequest`): + request (:class:`google.cloud.automl_v1beta1.types.ListModelEvaluationsRequest`): The request object. Request message for [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations]. parent (:class:`str`): @@ -2343,6 +2386,7 @@ async def list_model_evaluations( modelId is set as "-", this will list model evaluations from across all models of the parent location. + This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -2354,7 +2398,7 @@ async def list_model_evaluations( sent along with the request as metadata. Returns: - ~.pagers.ListModelEvaluationsAsyncPager: + google.cloud.automl_v1beta1.services.auto_ml.pagers.ListModelEvaluationsAsyncPager: Response message for [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations]. diff --git a/google/cloud/automl_v1beta1/services/auto_ml/client.py b/google/cloud/automl_v1beta1/services/auto_ml/client.py index a930910e..16b0e7cf 100644 --- a/google/cloud/automl_v1beta1/services/auto_ml/client.py +++ b/google/cloud/automl_v1beta1/services/auto_ml/client.py @@ -151,6 +151,22 @@ def _get_default_mtls_endpoint(api_endpoint): DEFAULT_ENDPOINT ) + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + AutoMlClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials @@ -163,7 +179,7 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): kwargs: Additional arguments to pass to the constructor. Returns: - {@api.name}: The constructed client. + AutoMlClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_file(filename) kwargs["credentials"] = credentials @@ -369,10 +385,10 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - transport (Union[str, ~.AutoMlTransport]): The + transport (Union[str, AutoMlTransport]): The transport to use. If set to None, a transport is chosen automatically. - client_options (client_options_lib.ClientOptions): Custom options for the + client_options (google.api_core.client_options.ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT @@ -408,21 +424,17 @@ def __init__( util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) ) - ssl_credentials = None + client_cert_source_func = None is_mtls = False if use_client_cert: if client_options.client_cert_source: - import grpc # type: ignore - - cert, key = client_options.client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) is_mtls = True + client_cert_source_func = client_options.client_cert_source else: - creds = SslCredentials() - is_mtls = creds.is_mtls - ssl_credentials = creds.ssl_credentials if is_mtls else None + is_mtls = mtls.has_default_client_cert_source() + client_cert_source_func = ( + mtls.default_client_cert_source() if is_mtls else None + ) # Figure out which api endpoint to use. if client_options.api_endpoint is not None: @@ -465,7 +477,7 @@ def __init__( credentials_file=client_options.credentials_file, host=api_endpoint, scopes=client_options.scopes, - ssl_channel_credentials=ssl_credentials, + client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, ) @@ -483,16 +495,17 @@ def create_dataset( r"""Creates a dataset. Args: - request (:class:`~.service.CreateDatasetRequest`): + request (google.cloud.automl_v1beta1.types.CreateDatasetRequest): The request object. Request message for [AutoMl.CreateDataset][google.cloud.automl.v1beta1.AutoMl.CreateDataset]. - parent (:class:`str`): + parent (str): Required. The resource name of the project to create the dataset for. + This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - dataset (:class:`~.gca_dataset.Dataset`): + dataset (google.cloud.automl_v1beta1.types.Dataset): Required. The dataset to create. This corresponds to the ``dataset`` field on the ``request`` instance; if ``request`` is provided, this @@ -505,7 +518,7 @@ def create_dataset( sent along with the request as metadata. Returns: - ~.gca_dataset.Dataset: + google.cloud.automl_v1beta1.types.Dataset: A workspace for solving a single, particular machine learning (ML) problem. A workspace contains examples @@ -565,12 +578,13 @@ def get_dataset( r"""Gets a dataset. Args: - request (:class:`~.service.GetDatasetRequest`): + request (google.cloud.automl_v1beta1.types.GetDatasetRequest): The request object. Request message for [AutoMl.GetDataset][google.cloud.automl.v1beta1.AutoMl.GetDataset]. - name (:class:`str`): + name (str): Required. The resource name of the dataset to retrieve. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -582,7 +596,7 @@ def get_dataset( sent along with the request as metadata. Returns: - ~.dataset.Dataset: + google.cloud.automl_v1beta1.types.Dataset: A workspace for solving a single, particular machine learning (ML) problem. A workspace contains examples @@ -640,12 +654,13 @@ def list_datasets( r"""Lists datasets in a project. Args: - request (:class:`~.service.ListDatasetsRequest`): + request (google.cloud.automl_v1beta1.types.ListDatasetsRequest): The request object. Request message for [AutoMl.ListDatasets][google.cloud.automl.v1beta1.AutoMl.ListDatasets]. - parent (:class:`str`): + parent (str): Required. The resource name of the project from which to list datasets. + This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -657,7 +672,7 @@ def list_datasets( sent along with the request as metadata. Returns: - ~.pagers.ListDatasetsPager: + google.cloud.automl_v1beta1.services.auto_ml.pagers.ListDatasetsPager: Response message for [AutoMl.ListDatasets][google.cloud.automl.v1beta1.AutoMl.ListDatasets]. @@ -722,12 +737,13 @@ def update_dataset( r"""Updates a dataset. Args: - request (:class:`~.service.UpdateDatasetRequest`): + request (google.cloud.automl_v1beta1.types.UpdateDatasetRequest): The request object. Request message for [AutoMl.UpdateDataset][google.cloud.automl.v1beta1.AutoMl.UpdateDataset] - dataset (:class:`~.gca_dataset.Dataset`): + dataset (google.cloud.automl_v1beta1.types.Dataset): Required. The dataset which replaces the resource on the server. + This corresponds to the ``dataset`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -739,7 +755,7 @@ def update_dataset( sent along with the request as metadata. Returns: - ~.gca_dataset.Dataset: + google.cloud.automl_v1beta1.types.Dataset: A workspace for solving a single, particular machine learning (ML) problem. A workspace contains examples @@ -803,12 +819,13 @@ def delete_dataset( [metadata][google.longrunning.Operation.metadata] field. Args: - request (:class:`~.service.DeleteDatasetRequest`): + request (google.cloud.automl_v1beta1.types.DeleteDatasetRequest): The request object. Request message for [AutoMl.DeleteDataset][google.cloud.automl.v1beta1.AutoMl.DeleteDataset]. - name (:class:`str`): + name (str): Required. The resource name of the dataset to delete. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -820,24 +837,22 @@ def delete_dataset( sent along with the request as metadata. Returns: - ~.operation.Operation: + google.api_core.operation.Operation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that - you can re-use to avoid defining duplicated empty - messages in your APIs. A typical example is to use it as - the request or the response type of an API method. For - instance: + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: - :: + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); - service Foo { - rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); - } + } - The JSON representation for ``Empty`` is empty JSON - object ``{}``. + The JSON representation for Empty is empty JSON + object {}. """ # Create or coerce a protobuf request object. @@ -909,20 +924,22 @@ def import_data( field when it completes. Args: - request (:class:`~.service.ImportDataRequest`): + request (google.cloud.automl_v1beta1.types.ImportDataRequest): The request object. Request message for [AutoMl.ImportData][google.cloud.automl.v1beta1.AutoMl.ImportData]. - name (:class:`str`): + name (str): Required. Dataset name. Dataset must already exist. All imported annotations and examples will be added. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - input_config (:class:`~.io.InputConfig`): + input_config (google.cloud.automl_v1beta1.types.InputConfig): Required. The desired input location and its domain specific semantics, if any. + This corresponds to the ``input_config`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -934,24 +951,22 @@ def import_data( sent along with the request as metadata. Returns: - ~.operation.Operation: + google.api_core.operation.Operation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that - you can re-use to avoid defining duplicated empty - messages in your APIs. A typical example is to use it as - the request or the response type of an API method. For - instance: + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: - :: + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); - service Foo { - rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); - } + } - The JSON representation for ``Empty`` is empty JSON - object ``{}``. + The JSON representation for Empty is empty JSON + object {}. """ # Create or coerce a protobuf request object. @@ -1019,18 +1034,20 @@ def export_data( completes. Args: - request (:class:`~.service.ExportDataRequest`): + request (google.cloud.automl_v1beta1.types.ExportDataRequest): The request object. Request message for [AutoMl.ExportData][google.cloud.automl.v1beta1.AutoMl.ExportData]. - name (:class:`str`): + name (str): Required. The resource name of the dataset. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - output_config (:class:`~.io.OutputConfig`): + output_config (google.cloud.automl_v1beta1.types.OutputConfig): Required. The desired output location. + This corresponds to the ``output_config`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -1042,24 +1059,22 @@ def export_data( sent along with the request as metadata. Returns: - ~.operation.Operation: + google.api_core.operation.Operation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that - you can re-use to avoid defining duplicated empty - messages in your APIs. A typical example is to use it as - the request or the response type of an API method. For - instance: + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: - :: + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); - service Foo { - rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); - } + } - The JSON representation for ``Empty`` is empty JSON - object ``{}``. + The JSON representation for Empty is empty JSON + object {}. """ # Create or coerce a protobuf request object. @@ -1123,12 +1138,13 @@ def get_annotation_spec( r"""Gets an annotation spec. Args: - request (:class:`~.service.GetAnnotationSpecRequest`): + request (google.cloud.automl_v1beta1.types.GetAnnotationSpecRequest): The request object. Request message for [AutoMl.GetAnnotationSpec][google.cloud.automl.v1beta1.AutoMl.GetAnnotationSpec]. - name (:class:`str`): + name (str): Required. The resource name of the annotation spec to retrieve. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -1140,7 +1156,7 @@ def get_annotation_spec( sent along with the request as metadata. Returns: - ~.annotation_spec.AnnotationSpec: + google.cloud.automl_v1beta1.types.AnnotationSpec: A definition of an annotation spec. """ # Create or coerce a protobuf request object. @@ -1194,12 +1210,13 @@ def get_table_spec( r"""Gets a table spec. Args: - request (:class:`~.service.GetTableSpecRequest`): + request (google.cloud.automl_v1beta1.types.GetTableSpecRequest): The request object. Request message for [AutoMl.GetTableSpec][google.cloud.automl.v1beta1.AutoMl.GetTableSpec]. - name (:class:`str`): + name (str): Required. The resource name of the table spec to retrieve. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -1211,18 +1228,17 @@ def get_table_spec( sent along with the request as metadata. Returns: - ~.table_spec.TableSpec: - A specification of a relational table. The table's - schema is represented via its child column specs. It is - pre-populated as part of ImportData by schema inference - algorithm, the version of which is a required parameter - of ImportData InputConfig. Note: While working with a - table, at times the schema may be inconsistent with the - data in the table (e.g. string in a FLOAT64 column). The - consistency validation is done upon creation of a model. - Used by: - - - Tables + google.cloud.automl_v1beta1.types.TableSpec: + A specification of a relational table. + The table's schema is represented via its child + column specs. It is pre-populated as part of + ImportData by schema inference algorithm, the version + of which is a required parameter of ImportData + InputConfig. Note: While working with a table, at + times the schema may be inconsistent with the data in + the table (e.g. string in a FLOAT64 column). The + consistency validation is done upon creation of a + model. Used by: \* Tables """ # Create or coerce a protobuf request object. @@ -1276,12 +1292,13 @@ def list_table_specs( r"""Lists table specs in a dataset. Args: - request (:class:`~.service.ListTableSpecsRequest`): + request (google.cloud.automl_v1beta1.types.ListTableSpecsRequest): The request object. Request message for [AutoMl.ListTableSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs]. - parent (:class:`str`): + parent (str): Required. The resource name of the dataset to list table specs from. + This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -1293,7 +1310,7 @@ def list_table_specs( sent along with the request as metadata. Returns: - ~.pagers.ListTableSpecsPager: + google.cloud.automl_v1beta1.services.auto_ml.pagers.ListTableSpecsPager: Response message for [AutoMl.ListTableSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs]. @@ -1358,12 +1375,13 @@ def update_table_spec( r"""Updates a table spec. Args: - request (:class:`~.service.UpdateTableSpecRequest`): + request (google.cloud.automl_v1beta1.types.UpdateTableSpecRequest): The request object. Request message for [AutoMl.UpdateTableSpec][google.cloud.automl.v1beta1.AutoMl.UpdateTableSpec] - table_spec (:class:`~.gca_table_spec.TableSpec`): + table_spec (google.cloud.automl_v1beta1.types.TableSpec): Required. The table spec which replaces the resource on the server. + This corresponds to the ``table_spec`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -1375,18 +1393,17 @@ def update_table_spec( sent along with the request as metadata. Returns: - ~.gca_table_spec.TableSpec: - A specification of a relational table. The table's - schema is represented via its child column specs. It is - pre-populated as part of ImportData by schema inference - algorithm, the version of which is a required parameter - of ImportData InputConfig. Note: While working with a - table, at times the schema may be inconsistent with the - data in the table (e.g. string in a FLOAT64 column). The - consistency validation is done upon creation of a model. - Used by: - - - Tables + google.cloud.automl_v1beta1.types.TableSpec: + A specification of a relational table. + The table's schema is represented via its child + column specs. It is pre-populated as part of + ImportData by schema inference algorithm, the version + of which is a required parameter of ImportData + InputConfig. Note: While working with a table, at + times the schema may be inconsistent with the data in + the table (e.g. string in a FLOAT64 column). The + consistency validation is done upon creation of a + model. Used by: \* Tables """ # Create or coerce a protobuf request object. @@ -1442,12 +1459,13 @@ def get_column_spec( r"""Gets a column spec. Args: - request (:class:`~.service.GetColumnSpecRequest`): + request (google.cloud.automl_v1beta1.types.GetColumnSpecRequest): The request object. Request message for [AutoMl.GetColumnSpec][google.cloud.automl.v1beta1.AutoMl.GetColumnSpec]. - name (:class:`str`): + name (str): Required. The resource name of the column spec to retrieve. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -1459,12 +1477,9 @@ def get_column_spec( sent along with the request as metadata. Returns: - ~.column_spec.ColumnSpec: - A representation of a column in a relational table. When - listing them, column specs are returned in the same - order in which they were given on import . Used by: - - - Tables + google.cloud.automl_v1beta1.types.ColumnSpec: + A representation of a column in a relational table. When listing them, column specs are returned in the same order in which they were + given on import . Used by: \* Tables """ # Create or coerce a protobuf request object. @@ -1518,12 +1533,13 @@ def list_column_specs( r"""Lists column specs in a table spec. Args: - request (:class:`~.service.ListColumnSpecsRequest`): + request (google.cloud.automl_v1beta1.types.ListColumnSpecsRequest): The request object. Request message for [AutoMl.ListColumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs]. - parent (:class:`str`): + parent (str): Required. The resource name of the table spec to list column specs from. + This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -1535,7 +1551,7 @@ def list_column_specs( sent along with the request as metadata. Returns: - ~.pagers.ListColumnSpecsPager: + google.cloud.automl_v1beta1.services.auto_ml.pagers.ListColumnSpecsPager: Response message for [AutoMl.ListColumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs]. @@ -1600,12 +1616,13 @@ def update_column_spec( r"""Updates a column spec. Args: - request (:class:`~.service.UpdateColumnSpecRequest`): + request (google.cloud.automl_v1beta1.types.UpdateColumnSpecRequest): The request object. Request message for [AutoMl.UpdateColumnSpec][google.cloud.automl.v1beta1.AutoMl.UpdateColumnSpec] - column_spec (:class:`~.gca_column_spec.ColumnSpec`): + column_spec (google.cloud.automl_v1beta1.types.ColumnSpec): Required. The column spec which replaces the resource on the server. + This corresponds to the ``column_spec`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -1617,12 +1634,9 @@ def update_column_spec( sent along with the request as metadata. Returns: - ~.gca_column_spec.ColumnSpec: - A representation of a column in a relational table. When - listing them, column specs are returned in the same - order in which they were given on import . Used by: - - - Tables + google.cloud.automl_v1beta1.types.ColumnSpec: + A representation of a column in a relational table. When listing them, column specs are returned in the same order in which they were + given on import . Used by: \* Tables """ # Create or coerce a protobuf request object. @@ -1683,17 +1697,18 @@ def create_model( each annotation spec. Args: - request (:class:`~.service.CreateModelRequest`): + request (google.cloud.automl_v1beta1.types.CreateModelRequest): The request object. Request message for [AutoMl.CreateModel][google.cloud.automl.v1beta1.AutoMl.CreateModel]. - parent (:class:`str`): + parent (str): Required. Resource name of the parent project where the model is being created. + This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - model (:class:`~.gca_model.Model`): + model (google.cloud.automl_v1beta1.types.Model): Required. The model to create. This corresponds to the ``model`` field on the ``request`` instance; if ``request`` is provided, this @@ -1706,12 +1721,12 @@ def create_model( sent along with the request as metadata. Returns: - ~.operation.Operation: + google.api_core.operation.Operation: An object representing a long-running operation. The result type for the operation will be - :class:``~.gca_model.Model``: API proto representing a - trained machine learning model. + :class:`google.cloud.automl_v1beta1.types.Model` API + proto representing a trained machine learning model. """ # Create or coerce a protobuf request object. @@ -1775,10 +1790,10 @@ def get_model( r"""Gets a model. Args: - request (:class:`~.service.GetModelRequest`): + request (google.cloud.automl_v1beta1.types.GetModelRequest): The request object. Request message for [AutoMl.GetModel][google.cloud.automl.v1beta1.AutoMl.GetModel]. - name (:class:`str`): + name (str): Required. Resource name of the model. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this @@ -1791,7 +1806,7 @@ def get_model( sent along with the request as metadata. Returns: - ~.model.Model: + google.cloud.automl_v1beta1.types.Model: API proto representing a trained machine learning model. @@ -1847,12 +1862,13 @@ def list_models( r"""Lists models. Args: - request (:class:`~.service.ListModelsRequest`): + request (google.cloud.automl_v1beta1.types.ListModelsRequest): The request object. Request message for [AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels]. - parent (:class:`str`): + parent (str): Required. Resource name of the project, from which to list the models. + This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -1864,7 +1880,7 @@ def list_models( sent along with the request as metadata. Returns: - ~.pagers.ListModelsPager: + google.cloud.automl_v1beta1.services.auto_ml.pagers.ListModelsPager: Response message for [AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels]. @@ -1932,12 +1948,13 @@ def delete_model( [metadata][google.longrunning.Operation.metadata] field. Args: - request (:class:`~.service.DeleteModelRequest`): + request (google.cloud.automl_v1beta1.types.DeleteModelRequest): The request object. Request message for [AutoMl.DeleteModel][google.cloud.automl.v1beta1.AutoMl.DeleteModel]. - name (:class:`str`): + name (str): Required. Resource name of the model being deleted. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -1949,24 +1966,22 @@ def delete_model( sent along with the request as metadata. Returns: - ~.operation.Operation: + google.api_core.operation.Operation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that - you can re-use to avoid defining duplicated empty - messages in your APIs. A typical example is to use it as - the request or the response type of an API method. For - instance: + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: - :: + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); - service Foo { - rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); - } + } - The JSON representation for ``Empty`` is empty JSON - object ``{}``. + The JSON representation for Empty is empty JSON + object {}. """ # Create or coerce a protobuf request object. @@ -2042,12 +2057,13 @@ def deploy_model( completes. Args: - request (:class:`~.service.DeployModelRequest`): + request (google.cloud.automl_v1beta1.types.DeployModelRequest): The request object. Request message for [AutoMl.DeployModel][google.cloud.automl.v1beta1.AutoMl.DeployModel]. - name (:class:`str`): + name (str): Required. Resource name of the model to deploy. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -2059,24 +2075,22 @@ def deploy_model( sent along with the request as metadata. Returns: - ~.operation.Operation: + google.api_core.operation.Operation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that - you can re-use to avoid defining duplicated empty - messages in your APIs. A typical example is to use it as - the request or the response type of an API method. For - instance: + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: - :: + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); - service Foo { - rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); - } + } - The JSON representation for ``Empty`` is empty JSON - object ``{}``. + The JSON representation for Empty is empty JSON + object {}. """ # Create or coerce a protobuf request object. @@ -2146,12 +2160,13 @@ def undeploy_model( completes. Args: - request (:class:`~.service.UndeployModelRequest`): + request (google.cloud.automl_v1beta1.types.UndeployModelRequest): The request object. Request message for [AutoMl.UndeployModel][google.cloud.automl.v1beta1.AutoMl.UndeployModel]. - name (:class:`str`): + name (str): Required. Resource name of the model to undeploy. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -2163,24 +2178,22 @@ def undeploy_model( sent along with the request as metadata. Returns: - ~.operation.Operation: + google.api_core.operation.Operation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that - you can re-use to avoid defining duplicated empty - messages in your APIs. A typical example is to use it as - the request or the response type of an API method. For - instance: + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: - :: + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); - service Foo { - rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); - } + } - The JSON representation for ``Empty`` is empty JSON - object ``{}``. + The JSON representation for Empty is empty JSON + object {}. """ # Create or coerce a protobuf request object. @@ -2251,20 +2264,22 @@ def export_model( completes. Args: - request (:class:`~.service.ExportModelRequest`): + request (google.cloud.automl_v1beta1.types.ExportModelRequest): The request object. Request message for [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]. Models need to be enabled for exporting, otherwise an error code will be returned. - name (:class:`str`): + name (str): Required. The resource name of the model to export. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - output_config (:class:`~.io.ModelExportOutputConfig`): + output_config (google.cloud.automl_v1beta1.types.ModelExportOutputConfig): Required. The desired output location and configuration. + This corresponds to the ``output_config`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -2276,24 +2291,22 @@ def export_model( sent along with the request as metadata. Returns: - ~.operation.Operation: + google.api_core.operation.Operation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that - you can re-use to avoid defining duplicated empty - messages in your APIs. A typical example is to use it as - the request or the response type of an API method. For - instance: + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: - :: + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); - service Foo { - rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); - } + } - The JSON representation for ``Empty`` is empty JSON - object ``{}``. + The JSON representation for Empty is empty JSON + object {}. """ # Create or coerce a protobuf request object. @@ -2372,19 +2385,21 @@ def export_evaluated_examples( completes. Args: - request (:class:`~.service.ExportEvaluatedExamplesRequest`): + request (google.cloud.automl_v1beta1.types.ExportEvaluatedExamplesRequest): The request object. Request message for [AutoMl.ExportEvaluatedExamples][google.cloud.automl.v1beta1.AutoMl.ExportEvaluatedExamples]. - name (:class:`str`): + name (str): Required. The resource name of the model whose evaluated examples are to be exported. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - output_config (:class:`~.io.ExportEvaluatedExamplesOutputConfig`): + output_config (google.cloud.automl_v1beta1.types.ExportEvaluatedExamplesOutputConfig): Required. The desired output location and configuration. + This corresponds to the ``output_config`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -2396,24 +2411,22 @@ def export_evaluated_examples( sent along with the request as metadata. Returns: - ~.operation.Operation: + google.api_core.operation.Operation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.empty.Empty``: A generic empty message that - you can re-use to avoid defining duplicated empty - messages in your APIs. A typical example is to use it as - the request or the response type of an API method. For - instance: + The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated + empty messages in your APIs. A typical example is to + use it as the request or the response type of an API + method. For instance: - :: + service Foo { + rpc Bar(google.protobuf.Empty) returns + (google.protobuf.Empty); - service Foo { - rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); - } + } - The JSON representation for ``Empty`` is empty JSON - object ``{}``. + The JSON representation for Empty is empty JSON + object {}. """ # Create or coerce a protobuf request object. @@ -2479,12 +2492,13 @@ def get_model_evaluation( r"""Gets a model evaluation. Args: - request (:class:`~.service.GetModelEvaluationRequest`): + request (google.cloud.automl_v1beta1.types.GetModelEvaluationRequest): The request object. Request message for [AutoMl.GetModelEvaluation][google.cloud.automl.v1beta1.AutoMl.GetModelEvaluation]. - name (:class:`str`): + name (str): Required. Resource name for the model evaluation. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -2496,7 +2510,7 @@ def get_model_evaluation( sent along with the request as metadata. Returns: - ~.model_evaluation.ModelEvaluation: + google.cloud.automl_v1beta1.types.ModelEvaluation: Evaluation results of a model. """ # Create or coerce a protobuf request object. @@ -2550,15 +2564,16 @@ def list_model_evaluations( r"""Lists model evaluations. Args: - request (:class:`~.service.ListModelEvaluationsRequest`): + request (google.cloud.automl_v1beta1.types.ListModelEvaluationsRequest): The request object. Request message for [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations]. - parent (:class:`str`): + parent (str): Required. Resource name of the model to list the model evaluations for. If modelId is set as "-", this will list model evaluations from across all models of the parent location. + This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -2570,7 +2585,7 @@ def list_model_evaluations( sent along with the request as metadata. Returns: - ~.pagers.ListModelEvaluationsPager: + google.cloud.automl_v1beta1.services.auto_ml.pagers.ListModelEvaluationsPager: Response message for [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations]. diff --git a/google/cloud/automl_v1beta1/services/auto_ml/pagers.py b/google/cloud/automl_v1beta1/services/auto_ml/pagers.py index 60528c89..10233877 100644 --- a/google/cloud/automl_v1beta1/services/auto_ml/pagers.py +++ b/google/cloud/automl_v1beta1/services/auto_ml/pagers.py @@ -15,7 +15,16 @@ # limitations under the License. # -from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple +from typing import ( + Any, + AsyncIterable, + Awaitable, + Callable, + Iterable, + Sequence, + Tuple, + Optional, +) from google.cloud.automl_v1beta1.types import column_spec from google.cloud.automl_v1beta1.types import dataset @@ -29,7 +38,7 @@ class ListDatasetsPager: """A pager for iterating through ``list_datasets`` requests. This class thinly wraps an initial - :class:`~.service.ListDatasetsResponse` object, and + :class:`google.cloud.automl_v1beta1.types.ListDatasetsResponse` object, and provides an ``__iter__`` method to iterate through its ``datasets`` field. @@ -38,7 +47,7 @@ class ListDatasetsPager: through the ``datasets`` field on the corresponding responses. - All the usual :class:`~.service.ListDatasetsResponse` + All the usual :class:`google.cloud.automl_v1beta1.types.ListDatasetsResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ @@ -56,9 +65,9 @@ def __init__( Args: method (Callable): The method that was originally called, and which instantiated this pager. - request (:class:`~.service.ListDatasetsRequest`): + request (google.cloud.automl_v1beta1.types.ListDatasetsRequest): The initial request object. - response (:class:`~.service.ListDatasetsResponse`): + response (google.cloud.automl_v1beta1.types.ListDatasetsResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. @@ -91,7 +100,7 @@ class ListDatasetsAsyncPager: """A pager for iterating through ``list_datasets`` requests. This class thinly wraps an initial - :class:`~.service.ListDatasetsResponse` object, and + :class:`google.cloud.automl_v1beta1.types.ListDatasetsResponse` object, and provides an ``__aiter__`` method to iterate through its ``datasets`` field. @@ -100,7 +109,7 @@ class ListDatasetsAsyncPager: through the ``datasets`` field on the corresponding responses. - All the usual :class:`~.service.ListDatasetsResponse` + All the usual :class:`google.cloud.automl_v1beta1.types.ListDatasetsResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ @@ -118,9 +127,9 @@ def __init__( Args: method (Callable): The method that was originally called, and which instantiated this pager. - request (:class:`~.service.ListDatasetsRequest`): + request (google.cloud.automl_v1beta1.types.ListDatasetsRequest): The initial request object. - response (:class:`~.service.ListDatasetsResponse`): + response (google.cloud.automl_v1beta1.types.ListDatasetsResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. @@ -157,7 +166,7 @@ class ListTableSpecsPager: """A pager for iterating through ``list_table_specs`` requests. This class thinly wraps an initial - :class:`~.service.ListTableSpecsResponse` object, and + :class:`google.cloud.automl_v1beta1.types.ListTableSpecsResponse` object, and provides an ``__iter__`` method to iterate through its ``table_specs`` field. @@ -166,7 +175,7 @@ class ListTableSpecsPager: through the ``table_specs`` field on the corresponding responses. - All the usual :class:`~.service.ListTableSpecsResponse` + All the usual :class:`google.cloud.automl_v1beta1.types.ListTableSpecsResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ @@ -184,9 +193,9 @@ def __init__( Args: method (Callable): The method that was originally called, and which instantiated this pager. - request (:class:`~.service.ListTableSpecsRequest`): + request (google.cloud.automl_v1beta1.types.ListTableSpecsRequest): The initial request object. - response (:class:`~.service.ListTableSpecsResponse`): + response (google.cloud.automl_v1beta1.types.ListTableSpecsResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. @@ -219,7 +228,7 @@ class ListTableSpecsAsyncPager: """A pager for iterating through ``list_table_specs`` requests. This class thinly wraps an initial - :class:`~.service.ListTableSpecsResponse` object, and + :class:`google.cloud.automl_v1beta1.types.ListTableSpecsResponse` object, and provides an ``__aiter__`` method to iterate through its ``table_specs`` field. @@ -228,7 +237,7 @@ class ListTableSpecsAsyncPager: through the ``table_specs`` field on the corresponding responses. - All the usual :class:`~.service.ListTableSpecsResponse` + All the usual :class:`google.cloud.automl_v1beta1.types.ListTableSpecsResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ @@ -246,9 +255,9 @@ def __init__( Args: method (Callable): The method that was originally called, and which instantiated this pager. - request (:class:`~.service.ListTableSpecsRequest`): + request (google.cloud.automl_v1beta1.types.ListTableSpecsRequest): The initial request object. - response (:class:`~.service.ListTableSpecsResponse`): + response (google.cloud.automl_v1beta1.types.ListTableSpecsResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. @@ -285,7 +294,7 @@ class ListColumnSpecsPager: """A pager for iterating through ``list_column_specs`` requests. This class thinly wraps an initial - :class:`~.service.ListColumnSpecsResponse` object, and + :class:`google.cloud.automl_v1beta1.types.ListColumnSpecsResponse` object, and provides an ``__iter__`` method to iterate through its ``column_specs`` field. @@ -294,7 +303,7 @@ class ListColumnSpecsPager: through the ``column_specs`` field on the corresponding responses. - All the usual :class:`~.service.ListColumnSpecsResponse` + All the usual :class:`google.cloud.automl_v1beta1.types.ListColumnSpecsResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ @@ -312,9 +321,9 @@ def __init__( Args: method (Callable): The method that was originally called, and which instantiated this pager. - request (:class:`~.service.ListColumnSpecsRequest`): + request (google.cloud.automl_v1beta1.types.ListColumnSpecsRequest): The initial request object. - response (:class:`~.service.ListColumnSpecsResponse`): + response (google.cloud.automl_v1beta1.types.ListColumnSpecsResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. @@ -347,7 +356,7 @@ class ListColumnSpecsAsyncPager: """A pager for iterating through ``list_column_specs`` requests. This class thinly wraps an initial - :class:`~.service.ListColumnSpecsResponse` object, and + :class:`google.cloud.automl_v1beta1.types.ListColumnSpecsResponse` object, and provides an ``__aiter__`` method to iterate through its ``column_specs`` field. @@ -356,7 +365,7 @@ class ListColumnSpecsAsyncPager: through the ``column_specs`` field on the corresponding responses. - All the usual :class:`~.service.ListColumnSpecsResponse` + All the usual :class:`google.cloud.automl_v1beta1.types.ListColumnSpecsResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ @@ -374,9 +383,9 @@ def __init__( Args: method (Callable): The method that was originally called, and which instantiated this pager. - request (:class:`~.service.ListColumnSpecsRequest`): + request (google.cloud.automl_v1beta1.types.ListColumnSpecsRequest): The initial request object. - response (:class:`~.service.ListColumnSpecsResponse`): + response (google.cloud.automl_v1beta1.types.ListColumnSpecsResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. @@ -413,7 +422,7 @@ class ListModelsPager: """A pager for iterating through ``list_models`` requests. This class thinly wraps an initial - :class:`~.service.ListModelsResponse` object, and + :class:`google.cloud.automl_v1beta1.types.ListModelsResponse` object, and provides an ``__iter__`` method to iterate through its ``model`` field. @@ -422,7 +431,7 @@ class ListModelsPager: through the ``model`` field on the corresponding responses. - All the usual :class:`~.service.ListModelsResponse` + All the usual :class:`google.cloud.automl_v1beta1.types.ListModelsResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ @@ -440,9 +449,9 @@ def __init__( Args: method (Callable): The method that was originally called, and which instantiated this pager. - request (:class:`~.service.ListModelsRequest`): + request (google.cloud.automl_v1beta1.types.ListModelsRequest): The initial request object. - response (:class:`~.service.ListModelsResponse`): + response (google.cloud.automl_v1beta1.types.ListModelsResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. @@ -475,7 +484,7 @@ class ListModelsAsyncPager: """A pager for iterating through ``list_models`` requests. This class thinly wraps an initial - :class:`~.service.ListModelsResponse` object, and + :class:`google.cloud.automl_v1beta1.types.ListModelsResponse` object, and provides an ``__aiter__`` method to iterate through its ``model`` field. @@ -484,7 +493,7 @@ class ListModelsAsyncPager: through the ``model`` field on the corresponding responses. - All the usual :class:`~.service.ListModelsResponse` + All the usual :class:`google.cloud.automl_v1beta1.types.ListModelsResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ @@ -502,9 +511,9 @@ def __init__( Args: method (Callable): The method that was originally called, and which instantiated this pager. - request (:class:`~.service.ListModelsRequest`): + request (google.cloud.automl_v1beta1.types.ListModelsRequest): The initial request object. - response (:class:`~.service.ListModelsResponse`): + response (google.cloud.automl_v1beta1.types.ListModelsResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. @@ -541,7 +550,7 @@ class ListModelEvaluationsPager: """A pager for iterating through ``list_model_evaluations`` requests. This class thinly wraps an initial - :class:`~.service.ListModelEvaluationsResponse` object, and + :class:`google.cloud.automl_v1beta1.types.ListModelEvaluationsResponse` object, and provides an ``__iter__`` method to iterate through its ``model_evaluation`` field. @@ -550,7 +559,7 @@ class ListModelEvaluationsPager: through the ``model_evaluation`` field on the corresponding responses. - All the usual :class:`~.service.ListModelEvaluationsResponse` + All the usual :class:`google.cloud.automl_v1beta1.types.ListModelEvaluationsResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ @@ -568,9 +577,9 @@ def __init__( Args: method (Callable): The method that was originally called, and which instantiated this pager. - request (:class:`~.service.ListModelEvaluationsRequest`): + request (google.cloud.automl_v1beta1.types.ListModelEvaluationsRequest): The initial request object. - response (:class:`~.service.ListModelEvaluationsResponse`): + response (google.cloud.automl_v1beta1.types.ListModelEvaluationsResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. @@ -603,7 +612,7 @@ class ListModelEvaluationsAsyncPager: """A pager for iterating through ``list_model_evaluations`` requests. This class thinly wraps an initial - :class:`~.service.ListModelEvaluationsResponse` object, and + :class:`google.cloud.automl_v1beta1.types.ListModelEvaluationsResponse` object, and provides an ``__aiter__`` method to iterate through its ``model_evaluation`` field. @@ -612,7 +621,7 @@ class ListModelEvaluationsAsyncPager: through the ``model_evaluation`` field on the corresponding responses. - All the usual :class:`~.service.ListModelEvaluationsResponse` + All the usual :class:`google.cloud.automl_v1beta1.types.ListModelEvaluationsResponse` attributes are available on the pager. If multiple requests are made, only the most recent response is retained, and thus used for attribute lookup. """ @@ -630,9 +639,9 @@ def __init__( Args: method (Callable): The method that was originally called, and which instantiated this pager. - request (:class:`~.service.ListModelEvaluationsRequest`): + request (google.cloud.automl_v1beta1.types.ListModelEvaluationsRequest): The initial request object. - response (:class:`~.service.ListModelEvaluationsResponse`): + response (google.cloud.automl_v1beta1.types.ListModelEvaluationsResponse): The initial response object. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. diff --git a/google/cloud/automl_v1beta1/services/auto_ml/transports/base.py b/google/cloud/automl_v1beta1/services/auto_ml/transports/base.py index 63f0601f..bc7c22ea 100644 --- a/google/cloud/automl_v1beta1/services/auto_ml/transports/base.py +++ b/google/cloud/automl_v1beta1/services/auto_ml/transports/base.py @@ -78,10 +78,10 @@ def __init__( scope (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. @@ -89,6 +89,9 @@ def __init__( host += ":443" self._host = host + # Save the scopes. + self._scopes = scopes or self.AUTH_SCOPES + # If no credentials are provided, then determine the appropriate # defaults. if credentials and credentials_file: @@ -98,20 +101,17 @@ def __init__( if credentials_file is not None: credentials, _ = auth.load_credentials_from_file( - credentials_file, scopes=scopes, quota_project_id=quota_project_id + credentials_file, scopes=self._scopes, quota_project_id=quota_project_id ) elif credentials is None: credentials, _ = auth.default( - scopes=scopes, quota_project_id=quota_project_id + scopes=self._scopes, quota_project_id=quota_project_id ) # Save the credentials. self._credentials = credentials - # Lifted into its own function so it can be stubbed out during tests. - self._prep_wrapped_messages(client_info) - def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { @@ -127,6 +127,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=5.0, ), default_timeout=5.0, client_info=client_info, @@ -140,6 +141,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=5.0, ), default_timeout=5.0, client_info=client_info, @@ -156,6 +158,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=5.0, ), default_timeout=5.0, client_info=client_info, @@ -175,6 +178,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=5.0, ), default_timeout=5.0, client_info=client_info, @@ -188,6 +192,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=5.0, ), default_timeout=5.0, client_info=client_info, @@ -201,6 +206,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=5.0, ), default_timeout=5.0, client_info=client_info, @@ -217,6 +223,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=5.0, ), default_timeout=5.0, client_info=client_info, @@ -230,6 +237,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=5.0, ), default_timeout=5.0, client_info=client_info, @@ -249,6 +257,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=5.0, ), default_timeout=5.0, client_info=client_info, @@ -262,6 +271,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=5.0, ), default_timeout=5.0, client_info=client_info, @@ -275,6 +285,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=5.0, ), default_timeout=5.0, client_info=client_info, @@ -302,6 +313,7 @@ def _prep_wrapped_messages(self, client_info): predicate=retries.if_exception_type( exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, ), + deadline=5.0, ), default_timeout=5.0, client_info=client_info, diff --git a/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc.py b/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc.py index 6eb4537b..9a03b53c 100644 --- a/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc.py +++ b/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc.py @@ -82,6 +82,7 @@ def __init__( api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: @@ -112,6 +113,10 @@ def __init__( ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): @@ -126,72 +131,61 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - warnings.warn( - "api_mtls_endpoint and client_cert_source are deprecated", - DeprecationWarning, - ) - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) else: - ssl_credentials = SslCredentials().ssl_credentials + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials - else: - host = host if ":" in host else host + ":443" - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, - ssl_credentials=ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -199,18 +193,8 @@ def __init__( ], ) - self._stubs = {} # type: Dict[str, Callable] - self._operations_client = None - - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @classmethod def create_channel( @@ -224,7 +208,7 @@ def create_channel( ) -> grpc.Channel: """Create and return a gRPC channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If diff --git a/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc_asyncio.py b/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc_asyncio.py index 4c8b2526..4f2781a0 100644 --- a/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc_asyncio.py +++ b/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc_asyncio.py @@ -86,7 +86,7 @@ def create_channel( ) -> aio.Channel: """Create and return a gRPC AsyncIO channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -126,6 +126,7 @@ def __init__( api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id=None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: @@ -157,12 +158,16 @@ def __init__( ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: @@ -171,72 +176,61 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - warnings.warn( - "api_mtls_endpoint and client_cert_source are deprecated", - DeprecationWarning, - ) - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) else: - ssl_credentials = SslCredentials().ssl_credentials + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials - else: - host = host if ":" in host else host + ":443" - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, - ssl_credentials=ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -244,18 +238,8 @@ def __init__( ], ) - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) - - self._stubs = {} - self._operations_client = None + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @property def grpc_channel(self) -> aio.Channel: diff --git a/google/cloud/automl_v1beta1/services/prediction_service/async_client.py b/google/cloud/automl_v1beta1/services/prediction_service/async_client.py index b1eadd8e..265c896a 100644 --- a/google/cloud/automl_v1beta1/services/prediction_service/async_client.py +++ b/google/cloud/automl_v1beta1/services/prediction_service/async_client.py @@ -85,7 +85,36 @@ class PredictionServiceAsyncClient: PredictionServiceClient.parse_common_location_path ) - from_service_account_file = PredictionServiceClient.from_service_account_file + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PredictionServiceAsyncClient: The constructed client. + """ + return PredictionServiceClient.from_service_account_info.__func__(PredictionServiceAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PredictionServiceAsyncClient: The constructed client. + """ + return PredictionServiceClient.from_service_account_file.__func__(PredictionServiceAsyncClient, filename, *args, **kwargs) # type: ignore + from_service_account_json = from_service_account_file @property @@ -183,24 +212,26 @@ async def predict( UTF-8 encoded. Args: - request (:class:`~.prediction_service.PredictRequest`): + request (:class:`google.cloud.automl_v1beta1.types.PredictRequest`): The request object. Request message for [PredictionService.Predict][google.cloud.automl.v1beta1.PredictionService.Predict]. name (:class:`str`): Required. Name of the model requested to serve the prediction. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - payload (:class:`~.data_items.ExamplePayload`): + payload (:class:`google.cloud.automl_v1beta1.types.ExamplePayload`): Required. Payload to perform a prediction on. The payload must match the problem type that the model was trained to solve. + This corresponds to the ``payload`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - params (:class:`Sequence[~.prediction_service.PredictRequest.ParamsEntry]`): + params (:class:`Sequence[google.cloud.automl_v1beta1.types.PredictRequest.ParamsEntry]`): Additional domain-specific parameters, any string must be up to 25000 characters long. @@ -223,6 +254,7 @@ async def predict( - For Tables: feature_importance - (boolean) Whether feature importance should be populated in the returned TablesAnnotation. The default is false. + This corresponds to the ``params`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -234,7 +266,7 @@ async def predict( sent along with the request as metadata. Returns: - ~.prediction_service.PredictResponse: + google.cloud.automl_v1beta1.types.PredictResponse: Response message for [PredictionService.Predict][google.cloud.automl.v1beta1.PredictionService.Predict]. @@ -313,29 +345,32 @@ async def batch_predict( - Tables Args: - request (:class:`~.prediction_service.BatchPredictRequest`): + request (:class:`google.cloud.automl_v1beta1.types.BatchPredictRequest`): The request object. Request message for [PredictionService.BatchPredict][google.cloud.automl.v1beta1.PredictionService.BatchPredict]. name (:class:`str`): Required. Name of the model requested to serve the batch prediction. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - input_config (:class:`~.io.BatchPredictInputConfig`): + input_config (:class:`google.cloud.automl_v1beta1.types.BatchPredictInputConfig`): Required. The input configuration for batch prediction. + This corresponds to the ``input_config`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - output_config (:class:`~.io.BatchPredictOutputConfig`): + output_config (:class:`google.cloud.automl_v1beta1.types.BatchPredictOutputConfig`): Required. The Configuration specifying where output predictions should be written. + This corresponds to the ``output_config`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - params (:class:`Sequence[~.prediction_service.BatchPredictRequest.ParamsEntry]`): + params (:class:`Sequence[google.cloud.automl_v1beta1.types.BatchPredictRequest.ParamsEntry]`): Required. Additional domain-specific parameters for the predictions, any string must be up to 25000 characters long. @@ -419,6 +454,7 @@ async def batch_predict( least that long as a relative value of video frame size will be returned. Value in 0 to 1 range. Default is 0. + This corresponds to the ``params`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -430,15 +466,13 @@ async def batch_predict( sent along with the request as metadata. Returns: - ~.operation_async.AsyncOperation: + google.api_core.operation_async.AsyncOperation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.prediction_service.BatchPredictResult``: - Result of the Batch Predict. This message is returned in - [response][google.longrunning.Operation.response] of the - operation returned by the - [PredictionService.BatchPredict][google.cloud.automl.v1beta1.PredictionService.BatchPredict]. + The result type for the operation will be :class:`google.cloud.automl_v1beta1.types.BatchPredictResult` Result of the Batch Predict. This message is returned in + [response][google.longrunning.Operation.response] of + the operation returned by the + [PredictionService.BatchPredict][google.cloud.automl.v1beta1.PredictionService.BatchPredict]. """ # Create or coerce a protobuf request object. diff --git a/google/cloud/automl_v1beta1/services/prediction_service/client.py b/google/cloud/automl_v1beta1/services/prediction_service/client.py index 7508e83a..a1e74ba4 100644 --- a/google/cloud/automl_v1beta1/services/prediction_service/client.py +++ b/google/cloud/automl_v1beta1/services/prediction_service/client.py @@ -121,6 +121,22 @@ def _get_default_mtls_endpoint(api_endpoint): DEFAULT_ENDPOINT ) + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PredictionServiceClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials @@ -133,7 +149,7 @@ def from_service_account_file(cls, filename: str, *args, **kwargs): kwargs: Additional arguments to pass to the constructor. Returns: - {@api.name}: The constructed client. + PredictionServiceClient: The constructed client. """ credentials = service_account.Credentials.from_service_account_file(filename) kwargs["credentials"] = credentials @@ -241,10 +257,10 @@ def __init__( credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. - transport (Union[str, ~.PredictionServiceTransport]): The + transport (Union[str, PredictionServiceTransport]): The transport to use. If set to None, a transport is chosen automatically. - client_options (client_options_lib.ClientOptions): Custom options for the + client_options (google.api_core.client_options.ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT @@ -280,21 +296,17 @@ def __init__( util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) ) - ssl_credentials = None + client_cert_source_func = None is_mtls = False if use_client_cert: if client_options.client_cert_source: - import grpc # type: ignore - - cert, key = client_options.client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) is_mtls = True + client_cert_source_func = client_options.client_cert_source else: - creds = SslCredentials() - is_mtls = creds.is_mtls - ssl_credentials = creds.ssl_credentials if is_mtls else None + is_mtls = mtls.has_default_client_cert_source() + client_cert_source_func = ( + mtls.default_client_cert_source() if is_mtls else None + ) # Figure out which api endpoint to use. if client_options.api_endpoint is not None: @@ -337,7 +349,7 @@ def __init__( credentials_file=client_options.credentials_file, host=api_endpoint, scopes=client_options.scopes, - ssl_channel_credentials=ssl_credentials, + client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, ) @@ -376,24 +388,26 @@ def predict( UTF-8 encoded. Args: - request (:class:`~.prediction_service.PredictRequest`): + request (google.cloud.automl_v1beta1.types.PredictRequest): The request object. Request message for [PredictionService.Predict][google.cloud.automl.v1beta1.PredictionService.Predict]. - name (:class:`str`): + name (str): Required. Name of the model requested to serve the prediction. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - payload (:class:`~.data_items.ExamplePayload`): + payload (google.cloud.automl_v1beta1.types.ExamplePayload): Required. Payload to perform a prediction on. The payload must match the problem type that the model was trained to solve. + This corresponds to the ``payload`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - params (:class:`Sequence[~.prediction_service.PredictRequest.ParamsEntry]`): + params (Sequence[google.cloud.automl_v1beta1.types.PredictRequest.ParamsEntry]): Additional domain-specific parameters, any string must be up to 25000 characters long. @@ -416,6 +430,7 @@ def predict( - For Tables: feature_importance - (boolean) Whether feature importance should be populated in the returned TablesAnnotation. The default is false. + This corresponds to the ``params`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -427,7 +442,7 @@ def predict( sent along with the request as metadata. Returns: - ~.prediction_service.PredictResponse: + google.cloud.automl_v1beta1.types.PredictResponse: Response message for [PredictionService.Predict][google.cloud.automl.v1beta1.PredictionService.Predict]. @@ -456,9 +471,8 @@ def predict( request.name = name if payload is not None: request.payload = payload - - if params: - request.params.update(params) + if params is not None: + request.params = params # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. @@ -507,29 +521,32 @@ def batch_predict( - Tables Args: - request (:class:`~.prediction_service.BatchPredictRequest`): + request (google.cloud.automl_v1beta1.types.BatchPredictRequest): The request object. Request message for [PredictionService.BatchPredict][google.cloud.automl.v1beta1.PredictionService.BatchPredict]. - name (:class:`str`): + name (str): Required. Name of the model requested to serve the batch prediction. + This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - input_config (:class:`~.io.BatchPredictInputConfig`): + input_config (google.cloud.automl_v1beta1.types.BatchPredictInputConfig): Required. The input configuration for batch prediction. + This corresponds to the ``input_config`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - output_config (:class:`~.io.BatchPredictOutputConfig`): + output_config (google.cloud.automl_v1beta1.types.BatchPredictOutputConfig): Required. The Configuration specifying where output predictions should be written. + This corresponds to the ``output_config`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - params (:class:`Sequence[~.prediction_service.BatchPredictRequest.ParamsEntry]`): + params (Sequence[google.cloud.automl_v1beta1.types.BatchPredictRequest.ParamsEntry]): Required. Additional domain-specific parameters for the predictions, any string must be up to 25000 characters long. @@ -613,6 +630,7 @@ def batch_predict( least that long as a relative value of video frame size will be returned. Value in 0 to 1 range. Default is 0. + This corresponds to the ``params`` field on the ``request`` instance; if ``request`` is provided, this should not be set. @@ -624,15 +642,13 @@ def batch_predict( sent along with the request as metadata. Returns: - ~.operation.Operation: + google.api_core.operation.Operation: An object representing a long-running operation. - The result type for the operation will be - :class:``~.prediction_service.BatchPredictResult``: - Result of the Batch Predict. This message is returned in - [response][google.longrunning.Operation.response] of the - operation returned by the - [PredictionService.BatchPredict][google.cloud.automl.v1beta1.PredictionService.BatchPredict]. + The result type for the operation will be :class:`google.cloud.automl_v1beta1.types.BatchPredictResult` Result of the Batch Predict. This message is returned in + [response][google.longrunning.Operation.response] of + the operation returned by the + [PredictionService.BatchPredict][google.cloud.automl.v1beta1.PredictionService.BatchPredict]. """ # Create or coerce a protobuf request object. @@ -661,9 +677,8 @@ def batch_predict( request.input_config = input_config if output_config is not None: request.output_config = output_config - - if params: - request.params.update(params) + if params is not None: + request.params = params # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. diff --git a/google/cloud/automl_v1beta1/services/prediction_service/transports/base.py b/google/cloud/automl_v1beta1/services/prediction_service/transports/base.py index 04857f4c..0efb4539 100644 --- a/google/cloud/automl_v1beta1/services/prediction_service/transports/base.py +++ b/google/cloud/automl_v1beta1/services/prediction_service/transports/base.py @@ -69,10 +69,10 @@ def __init__( scope (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. @@ -80,6 +80,9 @@ def __init__( host += ":443" self._host = host + # Save the scopes. + self._scopes = scopes or self.AUTH_SCOPES + # If no credentials are provided, then determine the appropriate # defaults. if credentials and credentials_file: @@ -89,20 +92,17 @@ def __init__( if credentials_file is not None: credentials, _ = auth.load_credentials_from_file( - credentials_file, scopes=scopes, quota_project_id=quota_project_id + credentials_file, scopes=self._scopes, quota_project_id=quota_project_id ) elif credentials is None: credentials, _ = auth.default( - scopes=scopes, quota_project_id=quota_project_id + scopes=self._scopes, quota_project_id=quota_project_id ) # Save the credentials. self._credentials = credentials - # Lifted into its own function so it can be stubbed out during tests. - self._prep_wrapped_messages(client_info) - def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { diff --git a/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc.py b/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc.py index 7db54b76..5074f5b2 100644 --- a/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc.py +++ b/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc.py @@ -62,6 +62,7 @@ def __init__( api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: @@ -92,6 +93,10 @@ def __init__( ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): @@ -106,72 +111,61 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - warnings.warn( - "api_mtls_endpoint and client_cert_source are deprecated", - DeprecationWarning, - ) - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) else: - ssl_credentials = SslCredentials().ssl_credentials + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials - else: - host = host if ":" in host else host + ":443" - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, - ssl_credentials=ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -179,18 +173,8 @@ def __init__( ], ) - self._stubs = {} # type: Dict[str, Callable] - self._operations_client = None - - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @classmethod def create_channel( @@ -204,7 +188,7 @@ def create_channel( ) -> grpc.Channel: """Create and return a gRPC channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If diff --git a/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc_asyncio.py b/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc_asyncio.py index 7f1c477c..dfd52483 100644 --- a/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc_asyncio.py +++ b/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc_asyncio.py @@ -66,7 +66,7 @@ def create_channel( ) -> aio.Channel: """Create and return a gRPC AsyncIO channel object. Args: - address (Optional[str]): The host for the channel to use. + host (Optional[str]): The host for the channel to use. credentials (Optional[~.Credentials]): The authorization credentials to attach to requests. These credentials identify this application to the service. If @@ -106,6 +106,7 @@ def __init__( api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id=None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: @@ -137,12 +138,16 @@ def __init__( ``api_mtls_endpoint`` is None. ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing your own client library. Raises: @@ -151,72 +156,61 @@ def __init__( google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` and ``credentials_file`` are passed. """ + self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) if channel: - # Sanity check: Ensure that channel and credentials are not both - # provided. + # Ignore credentials if a channel was passed. credentials = False - # If a channel was explicitly provided, set it. self._grpc_channel = channel self._ssl_channel_credentials = None - elif api_mtls_endpoint: - warnings.warn( - "api_mtls_endpoint and client_cert_source are deprecated", - DeprecationWarning, - ) - host = ( - api_mtls_endpoint - if ":" in api_mtls_endpoint - else api_mtls_endpoint + ":443" - ) + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - ssl_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) else: - ssl_credentials = SslCredentials().ssl_credentials + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) - # create a new channel. The provided one is ignored. - self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - ssl_credentials=ssl_credentials, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - self._ssl_channel_credentials = ssl_credentials - else: - host = host if ":" in host else host + ":443" - - if credentials is None: - credentials, _ = auth.default( - scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id - ) + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + ) - # create a new channel. The provided one is ignored. + if not self._grpc_channel: self._grpc_channel = type(self).create_channel( - host, - credentials=credentials, + self._host, + credentials=self._credentials, credentials_file=credentials_file, - ssl_credentials=ssl_channel_credentials, - scopes=scopes or self.AUTH_SCOPES, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, quota_project_id=quota_project_id, options=[ ("grpc.max_send_message_length", -1), @@ -224,18 +218,8 @@ def __init__( ], ) - # Run the base constructor. - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes or self.AUTH_SCOPES, - quota_project_id=quota_project_id, - client_info=client_info, - ) - - self._stubs = {} - self._operations_client = None + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) @property def grpc_channel(self) -> aio.Channel: diff --git a/google/cloud/automl_v1beta1/types/__init__.py b/google/cloud/automl_v1beta1/types/__init__.py index 0dde57f5..1072fb32 100644 --- a/google/cloud/automl_v1beta1/types/__init__.py +++ b/google/cloud/automl_v1beta1/types/__init__.py @@ -15,284 +15,284 @@ # limitations under the License. # -from .temporal import TimeSegment +from .annotation_payload import AnnotationPayload +from .annotation_spec import AnnotationSpec from .classification import ( ClassificationAnnotation, - VideoClassificationAnnotation, ClassificationEvaluationMetrics, + VideoClassificationAnnotation, ClassificationType, ) -from .geometry import ( - NormalizedVertex, - BoundingPoly, -) -from .detection import ( - ImageObjectDetectionAnnotation, - VideoObjectTrackingAnnotation, - BoundingBoxMetricsEntry, - ImageObjectDetectionEvaluationMetrics, - VideoObjectTrackingEvaluationMetrics, +from .column_spec import ColumnSpec +from .data_items import ( + Document, + DocumentDimensions, + ExamplePayload, + Image, + Row, + TextSnippet, ) from .data_stats import ( + ArrayStats, + CategoryStats, + CorrelationStats, DataStats, Float64Stats, StringStats, - TimestampStats, - ArrayStats, StructStats, - CategoryStats, - CorrelationStats, + TimestampStats, ) from .data_types import ( DataType, StructType, TypeCode, ) -from .column_spec import ColumnSpec -from .io import ( - InputConfig, - BatchPredictInputConfig, - DocumentInputConfig, - OutputConfig, - BatchPredictOutputConfig, - ModelExportOutputConfig, - ExportEvaluatedExamplesOutputConfig, - GcsSource, - BigQuerySource, - GcsDestination, - BigQueryDestination, - GcrDestination, -) -from .text_segment import TextSegment -from .data_items import ( - Image, - TextSnippet, - DocumentDimensions, - Document, - Row, - ExamplePayload, -) -from .ranges import DoubleRange -from .regression import RegressionEvaluationMetrics -from .tables import ( - TablesDatasetMetadata, - TablesModelMetadata, - TablesAnnotation, - TablesModelColumnInfo, -) -from .text_extraction import ( - TextExtractionAnnotation, - TextExtractionEvaluationMetrics, -) -from .text_sentiment import ( - TextSentimentAnnotation, - TextSentimentEvaluationMetrics, +from .dataset import Dataset +from .detection import ( + BoundingBoxMetricsEntry, + ImageObjectDetectionAnnotation, + ImageObjectDetectionEvaluationMetrics, + VideoObjectTrackingAnnotation, + VideoObjectTrackingEvaluationMetrics, ) -from .translation import ( - TranslationDatasetMetadata, - TranslationEvaluationMetrics, - TranslationModelMetadata, - TranslationAnnotation, +from .geometry import ( + BoundingPoly, + NormalizedVertex, ) -from .annotation_payload import AnnotationPayload -from .annotation_spec import AnnotationSpec from .image import ( ImageClassificationDatasetMetadata, - ImageObjectDetectionDatasetMetadata, - ImageClassificationModelMetadata, - ImageObjectDetectionModelMetadata, ImageClassificationModelDeploymentMetadata, + ImageClassificationModelMetadata, + ImageObjectDetectionDatasetMetadata, ImageObjectDetectionModelDeploymentMetadata, + ImageObjectDetectionModelMetadata, ) -from .text import ( - TextClassificationDatasetMetadata, - TextClassificationModelMetadata, - TextExtractionDatasetMetadata, - TextExtractionModelMetadata, - TextSentimentDatasetMetadata, - TextSentimentModelMetadata, -) -from .video import ( - VideoClassificationDatasetMetadata, - VideoObjectTrackingDatasetMetadata, - VideoClassificationModelMetadata, - VideoObjectTrackingModelMetadata, +from .io import ( + BatchPredictInputConfig, + BatchPredictOutputConfig, + BigQueryDestination, + BigQuerySource, + DocumentInputConfig, + ExportEvaluatedExamplesOutputConfig, + GcrDestination, + GcsDestination, + GcsSource, + InputConfig, + ModelExportOutputConfig, + OutputConfig, ) -from .dataset import Dataset from .model import Model from .model_evaluation import ModelEvaluation from .operations import ( - OperationMetadata, + BatchPredictOperationMetadata, + CreateModelOperationMetadata, DeleteOperationMetadata, DeployModelOperationMetadata, - UndeployModelOperationMetadata, - CreateModelOperationMetadata, - ImportDataOperationMetadata, ExportDataOperationMetadata, - BatchPredictOperationMetadata, - ExportModelOperationMetadata, ExportEvaluatedExamplesOperationMetadata, + ExportModelOperationMetadata, + ImportDataOperationMetadata, + OperationMetadata, + UndeployModelOperationMetadata, ) from .prediction_service import ( - PredictRequest, - PredictResponse, BatchPredictRequest, BatchPredictResult, + PredictRequest, + PredictResponse, ) -from .table_spec import TableSpec +from .ranges import DoubleRange +from .regression import RegressionEvaluationMetrics from .service import ( CreateDatasetRequest, - GetDatasetRequest, - ListDatasetsRequest, - ListDatasetsResponse, - UpdateDatasetRequest, + CreateModelRequest, DeleteDatasetRequest, - ImportDataRequest, + DeleteModelRequest, + DeployModelRequest, ExportDataRequest, + ExportEvaluatedExamplesRequest, + ExportModelRequest, GetAnnotationSpecRequest, - GetTableSpecRequest, - ListTableSpecsRequest, - ListTableSpecsResponse, - UpdateTableSpecRequest, GetColumnSpecRequest, + GetDatasetRequest, + GetModelEvaluationRequest, + GetModelRequest, + GetTableSpecRequest, + ImportDataRequest, ListColumnSpecsRequest, ListColumnSpecsResponse, - UpdateColumnSpecRequest, - CreateModelRequest, - GetModelRequest, + ListDatasetsRequest, + ListDatasetsResponse, + ListModelEvaluationsRequest, + ListModelEvaluationsResponse, ListModelsRequest, ListModelsResponse, - DeleteModelRequest, - DeployModelRequest, + ListTableSpecsRequest, + ListTableSpecsResponse, UndeployModelRequest, - ExportModelRequest, - ExportEvaluatedExamplesRequest, - GetModelEvaluationRequest, - ListModelEvaluationsRequest, - ListModelEvaluationsResponse, + UpdateColumnSpecRequest, + UpdateDatasetRequest, + UpdateTableSpecRequest, +) +from .table_spec import TableSpec +from .tables import ( + TablesAnnotation, + TablesDatasetMetadata, + TablesModelColumnInfo, + TablesModelMetadata, +) +from .temporal import TimeSegment +from .text import ( + TextClassificationDatasetMetadata, + TextClassificationModelMetadata, + TextExtractionDatasetMetadata, + TextExtractionModelMetadata, + TextSentimentDatasetMetadata, + TextSentimentModelMetadata, +) +from .text_extraction import ( + TextExtractionAnnotation, + TextExtractionEvaluationMetrics, +) +from .text_segment import TextSegment +from .text_sentiment import ( + TextSentimentAnnotation, + TextSentimentEvaluationMetrics, +) +from .translation import ( + TranslationAnnotation, + TranslationDatasetMetadata, + TranslationEvaluationMetrics, + TranslationModelMetadata, +) +from .video import ( + VideoClassificationDatasetMetadata, + VideoClassificationModelMetadata, + VideoObjectTrackingDatasetMetadata, + VideoObjectTrackingModelMetadata, ) __all__ = ( - "TimeSegment", + "AnnotationPayload", + "AnnotationSpec", "ClassificationAnnotation", - "VideoClassificationAnnotation", "ClassificationEvaluationMetrics", + "VideoClassificationAnnotation", "ClassificationType", - "NormalizedVertex", - "BoundingPoly", - "ImageObjectDetectionAnnotation", - "VideoObjectTrackingAnnotation", - "BoundingBoxMetricsEntry", - "ImageObjectDetectionEvaluationMetrics", - "VideoObjectTrackingEvaluationMetrics", + "ColumnSpec", + "Document", + "DocumentDimensions", + "ExamplePayload", + "Image", + "Row", + "TextSnippet", + "ArrayStats", + "CategoryStats", + "CorrelationStats", "DataStats", "Float64Stats", "StringStats", - "TimestampStats", - "ArrayStats", "StructStats", - "CategoryStats", - "CorrelationStats", + "TimestampStats", "DataType", "StructType", "TypeCode", - "ColumnSpec", - "InputConfig", + "Dataset", + "BoundingBoxMetricsEntry", + "ImageObjectDetectionAnnotation", + "ImageObjectDetectionEvaluationMetrics", + "VideoObjectTrackingAnnotation", + "VideoObjectTrackingEvaluationMetrics", + "BoundingPoly", + "NormalizedVertex", + "ImageClassificationDatasetMetadata", + "ImageClassificationModelDeploymentMetadata", + "ImageClassificationModelMetadata", + "ImageObjectDetectionDatasetMetadata", + "ImageObjectDetectionModelDeploymentMetadata", + "ImageObjectDetectionModelMetadata", "BatchPredictInputConfig", - "DocumentInputConfig", - "OutputConfig", "BatchPredictOutputConfig", - "ModelExportOutputConfig", - "ExportEvaluatedExamplesOutputConfig", - "GcsSource", - "BigQuerySource", - "GcsDestination", "BigQueryDestination", + "BigQuerySource", + "DocumentInputConfig", + "ExportEvaluatedExamplesOutputConfig", "GcrDestination", - "TextSegment", - "Image", - "TextSnippet", - "DocumentDimensions", - "Document", - "Row", - "ExamplePayload", - "DoubleRange", - "RegressionEvaluationMetrics", - "TablesDatasetMetadata", - "TablesModelMetadata", - "TablesAnnotation", - "TablesModelColumnInfo", - "TextExtractionAnnotation", - "TextExtractionEvaluationMetrics", - "TextSentimentAnnotation", - "TextSentimentEvaluationMetrics", - "TranslationDatasetMetadata", - "TranslationEvaluationMetrics", - "TranslationModelMetadata", - "TranslationAnnotation", - "AnnotationPayload", - "AnnotationSpec", - "ImageClassificationDatasetMetadata", - "ImageObjectDetectionDatasetMetadata", - "ImageClassificationModelMetadata", - "ImageObjectDetectionModelMetadata", - "ImageClassificationModelDeploymentMetadata", - "ImageObjectDetectionModelDeploymentMetadata", - "TextClassificationDatasetMetadata", - "TextClassificationModelMetadata", - "TextExtractionDatasetMetadata", - "TextExtractionModelMetadata", - "TextSentimentDatasetMetadata", - "TextSentimentModelMetadata", - "VideoClassificationDatasetMetadata", - "VideoObjectTrackingDatasetMetadata", - "VideoClassificationModelMetadata", - "VideoObjectTrackingModelMetadata", - "Dataset", + "GcsDestination", + "GcsSource", + "InputConfig", + "ModelExportOutputConfig", + "OutputConfig", "Model", "ModelEvaluation", - "OperationMetadata", + "BatchPredictOperationMetadata", + "CreateModelOperationMetadata", "DeleteOperationMetadata", "DeployModelOperationMetadata", - "UndeployModelOperationMetadata", - "CreateModelOperationMetadata", - "ImportDataOperationMetadata", "ExportDataOperationMetadata", - "BatchPredictOperationMetadata", - "ExportModelOperationMetadata", "ExportEvaluatedExamplesOperationMetadata", - "PredictRequest", - "PredictResponse", + "ExportModelOperationMetadata", + "ImportDataOperationMetadata", + "OperationMetadata", + "UndeployModelOperationMetadata", "BatchPredictRequest", "BatchPredictResult", - "TableSpec", + "PredictRequest", + "PredictResponse", + "DoubleRange", + "RegressionEvaluationMetrics", "CreateDatasetRequest", - "GetDatasetRequest", - "ListDatasetsRequest", - "ListDatasetsResponse", - "UpdateDatasetRequest", + "CreateModelRequest", "DeleteDatasetRequest", - "ImportDataRequest", + "DeleteModelRequest", + "DeployModelRequest", "ExportDataRequest", + "ExportEvaluatedExamplesRequest", + "ExportModelRequest", "GetAnnotationSpecRequest", - "GetTableSpecRequest", - "ListTableSpecsRequest", - "ListTableSpecsResponse", - "UpdateTableSpecRequest", "GetColumnSpecRequest", + "GetDatasetRequest", + "GetModelEvaluationRequest", + "GetModelRequest", + "GetTableSpecRequest", + "ImportDataRequest", "ListColumnSpecsRequest", "ListColumnSpecsResponse", - "UpdateColumnSpecRequest", - "CreateModelRequest", - "GetModelRequest", + "ListDatasetsRequest", + "ListDatasetsResponse", + "ListModelEvaluationsRequest", + "ListModelEvaluationsResponse", "ListModelsRequest", "ListModelsResponse", - "DeleteModelRequest", - "DeployModelRequest", + "ListTableSpecsRequest", + "ListTableSpecsResponse", "UndeployModelRequest", - "ExportModelRequest", - "ExportEvaluatedExamplesRequest", - "GetModelEvaluationRequest", - "ListModelEvaluationsRequest", - "ListModelEvaluationsResponse", + "UpdateColumnSpecRequest", + "UpdateDatasetRequest", + "UpdateTableSpecRequest", + "TableSpec", + "TablesAnnotation", + "TablesDatasetMetadata", + "TablesModelColumnInfo", + "TablesModelMetadata", + "TimeSegment", + "TextClassificationDatasetMetadata", + "TextClassificationModelMetadata", + "TextExtractionDatasetMetadata", + "TextExtractionModelMetadata", + "TextSentimentDatasetMetadata", + "TextSentimentModelMetadata", + "TextExtractionAnnotation", + "TextExtractionEvaluationMetrics", + "TextSegment", + "TextSentimentAnnotation", + "TextSentimentEvaluationMetrics", + "TranslationAnnotation", + "TranslationDatasetMetadata", + "TranslationEvaluationMetrics", + "TranslationModelMetadata", + "VideoClassificationDatasetMetadata", + "VideoClassificationModelMetadata", + "VideoObjectTrackingDatasetMetadata", + "VideoObjectTrackingModelMetadata", ) diff --git a/google/cloud/automl_v1beta1/types/annotation_payload.py b/google/cloud/automl_v1beta1/types/annotation_payload.py index 5e5d09cd..84bae153 100644 --- a/google/cloud/automl_v1beta1/types/annotation_payload.py +++ b/google/cloud/automl_v1beta1/types/annotation_payload.py @@ -35,24 +35,24 @@ class AnnotationPayload(proto.Message): r"""Contains annotation information that is relevant to AutoML. Attributes: - translation (~.gca_translation.TranslationAnnotation): + translation (google.cloud.automl_v1beta1.types.TranslationAnnotation): Annotation details for translation. - classification (~.gca_classification.ClassificationAnnotation): + classification (google.cloud.automl_v1beta1.types.ClassificationAnnotation): Annotation details for content or image classification. - image_object_detection (~.detection.ImageObjectDetectionAnnotation): + image_object_detection (google.cloud.automl_v1beta1.types.ImageObjectDetectionAnnotation): Annotation details for image object detection. - video_classification (~.gca_classification.VideoClassificationAnnotation): + video_classification (google.cloud.automl_v1beta1.types.VideoClassificationAnnotation): Annotation details for video classification. Returned for Video Classification predictions. - video_object_tracking (~.detection.VideoObjectTrackingAnnotation): + video_object_tracking (google.cloud.automl_v1beta1.types.VideoObjectTrackingAnnotation): Annotation details for video object tracking. - text_extraction (~.gca_text_extraction.TextExtractionAnnotation): + text_extraction (google.cloud.automl_v1beta1.types.TextExtractionAnnotation): Annotation details for text extraction. - text_sentiment (~.gca_text_sentiment.TextSentimentAnnotation): + text_sentiment (google.cloud.automl_v1beta1.types.TextSentimentAnnotation): Annotation details for text sentiment. - tables (~.gca_tables.TablesAnnotation): + tables (google.cloud.automl_v1beta1.types.TablesAnnotation): Annotation details for Tables. annotation_spec_id (str): Output only . The resource ID of the diff --git a/google/cloud/automl_v1beta1/types/classification.py b/google/cloud/automl_v1beta1/types/classification.py index b9f21ba3..20f3b4a6 100644 --- a/google/cloud/automl_v1beta1/types/classification.py +++ b/google/cloud/automl_v1beta1/types/classification.py @@ -88,10 +88,10 @@ class VideoClassificationAnnotation(proto.Message): done for this classification type, the quality of it depends on training data, but there are no metrics provided to describe that quality. - classification_annotation (~.classification.ClassificationAnnotation): + classification_annotation (google.cloud.automl_v1beta1.types.ClassificationAnnotation): Output only . The classification details of this annotation. - time_segment (~.temporal.TimeSegment): + time_segment (google.cloud.automl_v1beta1.types.TimeSegment): Output only . The time segment of the video to which the annotation applies. """ @@ -125,7 +125,7 @@ class ClassificationEvaluationMetrics(proto.Message): averaged for the overall evaluation. log_loss (float): Output only. The Log Loss metric. - confidence_metrics_entry (Sequence[~.classification.ClassificationEvaluationMetrics.ConfidenceMetricsEntry]): + confidence_metrics_entry (Sequence[google.cloud.automl_v1beta1.types.ClassificationEvaluationMetrics.ConfidenceMetricsEntry]): Output only. Metrics for each confidence_threshold in 0.00,0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 and position_threshold = INT32_MAX_VALUE. ROC and @@ -133,7 +133,7 @@ class ClassificationEvaluationMetrics(proto.Message): derived from them. The confidence metrics entries may also be supplied for additional values of position_threshold, but from these no aggregated metrics are computed. - confusion_matrix (~.classification.ClassificationEvaluationMetrics.ConfusionMatrix): + confusion_matrix (google.cloud.automl_v1beta1.types.ClassificationEvaluationMetrics.ConfusionMatrix): Output only. Confusion matrix of the evaluation. Only set for MULTICLASS classification problems where number of labels @@ -253,7 +253,7 @@ class ConfusionMatrix(proto.Message): [prediction_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type], distinct values of the target column at the moment of the model evaluation are populated here. - row (Sequence[~.classification.ClassificationEvaluationMetrics.ConfusionMatrix.Row]): + row (Sequence[google.cloud.automl_v1beta1.types.ClassificationEvaluationMetrics.ConfusionMatrix.Row]): Output only. Rows in the confusion matrix. The number of rows is equal to the size of ``annotation_spec_id``. ``row[i].example_count[j]`` is the number of examples that diff --git a/google/cloud/automl_v1beta1/types/column_spec.py b/google/cloud/automl_v1beta1/types/column_spec.py index 3aa86c4b..40d6976f 100644 --- a/google/cloud/automl_v1beta1/types/column_spec.py +++ b/google/cloud/automl_v1beta1/types/column_spec.py @@ -39,7 +39,7 @@ class ColumnSpec(proto.Message): Output only. The resource name of the column specs. Form: ``projects/{project_id}/locations/{location_id}/datasets/{dataset_id}/tableSpecs/{table_spec_id}/columnSpecs/{column_spec_id}`` - data_type (~.data_types.DataType): + data_type (google.cloud.automl_v1beta1.types.DataType): The data type of elements stored in the column. display_name (str): @@ -48,12 +48,12 @@ class ColumnSpec(proto.Message): consist only of ASCII Latin letters A-Z and a-z, ASCII digits 0-9, underscores(_), and forward slashes(/), and must start with a letter or a digit. - data_stats (~.gca_data_stats.DataStats): + data_stats (google.cloud.automl_v1beta1.types.DataStats): Output only. Stats of the series of values in the column. This field may be stale, see the ancestor's Dataset.tables_dataset_metadata.stats_update_time field for the timestamp at which these stats were last updated. - top_correlated_columns (Sequence[~.column_spec.ColumnSpec.CorrelatedColumn]): + top_correlated_columns (Sequence[google.cloud.automl_v1beta1.types.ColumnSpec.CorrelatedColumn]): Deprecated. etag (str): Used to perform consistent read-modify-write @@ -69,7 +69,7 @@ class CorrelatedColumn(proto.Message): column_spec_id (str): The column_spec_id of the correlated column, which belongs to the same table as the in-context column. - correlation_stats (~.gca_data_stats.CorrelationStats): + correlation_stats (google.cloud.automl_v1beta1.types.CorrelationStats): Correlation between this and the in-context column. """ diff --git a/google/cloud/automl_v1beta1/types/data_items.py b/google/cloud/automl_v1beta1/types/data_items.py index eff58bee..098ad147 100644 --- a/google/cloud/automl_v1beta1/types/data_items.py +++ b/google/cloud/automl_v1beta1/types/data_items.py @@ -46,7 +46,7 @@ class Image(proto.Message): Image content represented as a stream of bytes. Note: As with all ``bytes`` fields, protobuffers use a pure binary representation, whereas JSON representations use base64. - input_config (~.io.InputConfig): + input_config (google.cloud.automl_v1beta1.types.InputConfig): An input config specifying the content of the image. thumbnail_uri (str): @@ -92,7 +92,7 @@ class DocumentDimensions(proto.Message): r"""Message that describes dimension of a document. Attributes: - unit (~.data_items.DocumentDimensions.DocumentDimensionUnit): + unit (google.cloud.automl_v1beta1.types.DocumentDimensions.DocumentDimensionUnit): Unit of the dimension. width (float): Width value of the document, works together @@ -120,15 +120,15 @@ class Document(proto.Message): r"""A structured text document e.g. a PDF. Attributes: - input_config (~.io.DocumentInputConfig): + input_config (google.cloud.automl_v1beta1.types.DocumentInputConfig): An input config specifying the content of the document. - document_text (~.data_items.TextSnippet): + document_text (google.cloud.automl_v1beta1.types.TextSnippet): The plain text version of this document. - layout (Sequence[~.data_items.Document.Layout]): + layout (Sequence[google.cloud.automl_v1beta1.types.Document.Layout]): Describes the layout of the document. Sorted by [page_number][]. - document_dimensions (~.data_items.DocumentDimensions): + document_dimensions (google.cloud.automl_v1beta1.types.DocumentDimensions): The dimensions of the page in the document. page_count (int): Number of pages in the document. @@ -140,14 +140,14 @@ class Layout(proto.Message): in the document. Attributes: - text_segment (~.gca_text_segment.TextSegment): + text_segment (google.cloud.automl_v1beta1.types.TextSegment): Text Segment that represents a segment in [document_text][google.cloud.automl.v1beta1.Document.document_text]. page_number (int): Page number of the [text_segment][google.cloud.automl.v1beta1.Document.Layout.text_segment] in the original document, starts from 1. - bounding_poly (~.geometry.BoundingPoly): + bounding_poly (google.cloud.automl_v1beta1.types.BoundingPoly): The position of the [text_segment][google.cloud.automl.v1beta1.Document.Layout.text_segment] in the page. Contains exactly 4 @@ -158,7 +158,7 @@ class Layout(proto.Message): [NormalizedVertex-s][google.cloud.automl.v1beta1.NormalizedVertex] are relative to the page. Coordinates are based on top-left as point (0,0). - text_segment_type (~.data_items.Document.Layout.TextSegmentType): + text_segment_type (google.cloud.automl_v1beta1.types.Document.Layout.TextSegmentType): The type of the [text_segment][google.cloud.automl.v1beta1.Document.Layout.text_segment] in document. @@ -219,7 +219,7 @@ class Row(proto.Message): of the Model this row is being passed to. Note: The below ``values`` field must match order of this field, if this field is set. - values (Sequence[~.struct.Value]): + values (Sequence[google.protobuf.struct_pb2.Value]): Required. The values of the row cells, given in the same order as the column_spec_ids, or, if not set, then in the same order as input feature @@ -237,13 +237,13 @@ class ExamplePayload(proto.Message): r"""Example data used for training or prediction. Attributes: - image (~.data_items.Image): + image (google.cloud.automl_v1beta1.types.Image): Example image. - text_snippet (~.data_items.TextSnippet): + text_snippet (google.cloud.automl_v1beta1.types.TextSnippet): Example text. - document (~.data_items.Document): + document (google.cloud.automl_v1beta1.types.Document): Example document. - row (~.data_items.Row): + row (google.cloud.automl_v1beta1.types.Row): Example relational table row. """ diff --git a/google/cloud/automl_v1beta1/types/data_stats.py b/google/cloud/automl_v1beta1/types/data_stats.py index 75a9dc38..e6c60cd0 100644 --- a/google/cloud/automl_v1beta1/types/data_stats.py +++ b/google/cloud/automl_v1beta1/types/data_stats.py @@ -38,17 +38,17 @@ class DataStats(proto.Message): DataType. Attributes: - float64_stats (~.data_stats.Float64Stats): + float64_stats (google.cloud.automl_v1beta1.types.Float64Stats): The statistics for FLOAT64 DataType. - string_stats (~.data_stats.StringStats): + string_stats (google.cloud.automl_v1beta1.types.StringStats): The statistics for STRING DataType. - timestamp_stats (~.data_stats.TimestampStats): + timestamp_stats (google.cloud.automl_v1beta1.types.TimestampStats): The statistics for TIMESTAMP DataType. - array_stats (~.data_stats.ArrayStats): + array_stats (google.cloud.automl_v1beta1.types.ArrayStats): The statistics for ARRAY DataType. - struct_stats (~.data_stats.StructStats): + struct_stats (google.cloud.automl_v1beta1.types.StructStats): The statistics for STRUCT DataType. - category_stats (~.data_stats.CategoryStats): + category_stats (google.cloud.automl_v1beta1.types.CategoryStats): The statistics for CATEGORY DataType. distinct_value_count (int): The number of distinct values. @@ -102,7 +102,7 @@ class Float64Stats(proto.Message): n values. The value at index i is, approximately, the i*n/k-th smallest value in the series; for i = 0 and i = k these are, respectively, the min and max values. - histogram_buckets (Sequence[~.data_stats.Float64Stats.HistogramBucket]): + histogram_buckets (Sequence[google.cloud.automl_v1beta1.types.Float64Stats.HistogramBucket]): Histogram buckets of the data series. Sorted by the min value of the bucket, ascendingly, and the number of the buckets is dynamically generated. The buckets are @@ -146,7 +146,7 @@ class StringStats(proto.Message): r"""The data statistics of a series of STRING values. Attributes: - top_unigram_stats (Sequence[~.data_stats.StringStats.UnigramStats]): + top_unigram_stats (Sequence[google.cloud.automl_v1beta1.types.StringStats.UnigramStats]): The statistics of the top 20 unigrams, ordered by [count][google.cloud.automl.v1beta1.StringStats.UnigramStats.count]. """ @@ -175,7 +175,7 @@ class TimestampStats(proto.Message): r"""The data statistics of a series of TIMESTAMP values. Attributes: - granular_stats (Sequence[~.data_stats.TimestampStats.GranularStatsEntry]): + granular_stats (Sequence[google.cloud.automl_v1beta1.types.TimestampStats.GranularStatsEntry]): The string key is the pre-defined granularity. Currently supported: hour_of_day, day_of_week, month_of_year. Granularities finer that the granularity of timestamp data @@ -187,7 +187,7 @@ class GranularStats(proto.Message): r"""Stats split by a defined in context granularity. Attributes: - buckets (Sequence[~.data_stats.TimestampStats.GranularStats.BucketsEntry]): + buckets (Sequence[google.cloud.automl_v1beta1.types.TimestampStats.GranularStats.BucketsEntry]): A map from granularity key to example count for that key. E.g. for hour_of_day ``13`` means 1pm, or for month_of_year ``5`` means May). @@ -204,7 +204,7 @@ class ArrayStats(proto.Message): r"""The data statistics of a series of ARRAY values. Attributes: - member_stats (~.data_stats.DataStats): + member_stats (google.cloud.automl_v1beta1.types.DataStats): Stats of all the values of all arrays, as if they were a single long series of data. The type depends on the element type of the array. @@ -217,7 +217,7 @@ class StructStats(proto.Message): r"""The data statistics of a series of STRUCT values. Attributes: - field_stats (Sequence[~.data_stats.StructStats.FieldStatsEntry]): + field_stats (Sequence[google.cloud.automl_v1beta1.types.StructStats.FieldStatsEntry]): Map from a field name of the struct to data stats aggregated over series of all data in that field across all the structs. @@ -232,7 +232,7 @@ class CategoryStats(proto.Message): r"""The data statistics of a series of CATEGORY values. Attributes: - top_category_stats (Sequence[~.data_stats.CategoryStats.SingleCategoryStats]): + top_category_stats (Sequence[google.cloud.automl_v1beta1.types.CategoryStats.SingleCategoryStats]): The statistics of the top 20 CATEGORY values, ordered by [count][google.cloud.automl.v1beta1.CategoryStats.SingleCategoryStats.count]. diff --git a/google/cloud/automl_v1beta1/types/data_types.py b/google/cloud/automl_v1beta1/types/data_types.py index 6faa598b..9b335aeb 100644 --- a/google/cloud/automl_v1beta1/types/data_types.py +++ b/google/cloud/automl_v1beta1/types/data_types.py @@ -42,12 +42,12 @@ class DataType(proto.Message): data entity (e.g. a table). Attributes: - list_element_type (~.data_types.DataType): + list_element_type (google.cloud.automl_v1beta1.types.DataType): If [type_code][google.cloud.automl.v1beta1.DataType.type_code] == [ARRAY][google.cloud.automl.v1beta1.TypeCode.ARRAY], then ``list_element_type`` is the type of the elements. - struct_type (~.data_types.StructType): + struct_type (google.cloud.automl_v1beta1.types.StructType): If [type_code][google.cloud.automl.v1beta1.DataType.type_code] == [STRUCT][google.cloud.automl.v1beta1.TypeCode.STRUCT], @@ -69,7 +69,7 @@ class DataType(proto.Message): the Unix epoch); or be written in ``strftime`` syntax. If time_format is not set, then the default format as described on the type_code is used. - type_code (~.data_types.TypeCode): + type_code (google.cloud.automl_v1beta1.types.TypeCode): Required. The [TypeCode][google.cloud.automl.v1beta1.TypeCode] for this type. @@ -98,7 +98,7 @@ class StructType(proto.Message): [STRUCT][google.cloud.automl.v1beta1.TypeCode.STRUCT] type. Attributes: - fields (Sequence[~.data_types.StructType.FieldsEntry]): + fields (Sequence[google.cloud.automl_v1beta1.types.StructType.FieldsEntry]): Unordered map of struct field names to their data types. Fields cannot be added or removed via Update. Their names and data types are still diff --git a/google/cloud/automl_v1beta1/types/dataset.py b/google/cloud/automl_v1beta1/types/dataset.py index ef31c859..438dc794 100644 --- a/google/cloud/automl_v1beta1/types/dataset.py +++ b/google/cloud/automl_v1beta1/types/dataset.py @@ -37,30 +37,30 @@ class Dataset(proto.Message): annotated. Attributes: - translation_dataset_metadata (~.translation.TranslationDatasetMetadata): + translation_dataset_metadata (google.cloud.automl_v1beta1.types.TranslationDatasetMetadata): Metadata for a dataset used for translation. - image_classification_dataset_metadata (~.image.ImageClassificationDatasetMetadata): + image_classification_dataset_metadata (google.cloud.automl_v1beta1.types.ImageClassificationDatasetMetadata): Metadata for a dataset used for image classification. - text_classification_dataset_metadata (~.text.TextClassificationDatasetMetadata): + text_classification_dataset_metadata (google.cloud.automl_v1beta1.types.TextClassificationDatasetMetadata): Metadata for a dataset used for text classification. - image_object_detection_dataset_metadata (~.image.ImageObjectDetectionDatasetMetadata): + image_object_detection_dataset_metadata (google.cloud.automl_v1beta1.types.ImageObjectDetectionDatasetMetadata): Metadata for a dataset used for image object detection. - video_classification_dataset_metadata (~.video.VideoClassificationDatasetMetadata): + video_classification_dataset_metadata (google.cloud.automl_v1beta1.types.VideoClassificationDatasetMetadata): Metadata for a dataset used for video classification. - video_object_tracking_dataset_metadata (~.video.VideoObjectTrackingDatasetMetadata): + video_object_tracking_dataset_metadata (google.cloud.automl_v1beta1.types.VideoObjectTrackingDatasetMetadata): Metadata for a dataset used for video object tracking. - text_extraction_dataset_metadata (~.text.TextExtractionDatasetMetadata): + text_extraction_dataset_metadata (google.cloud.automl_v1beta1.types.TextExtractionDatasetMetadata): Metadata for a dataset used for text extraction. - text_sentiment_dataset_metadata (~.text.TextSentimentDatasetMetadata): + text_sentiment_dataset_metadata (google.cloud.automl_v1beta1.types.TextSentimentDatasetMetadata): Metadata for a dataset used for text sentiment. - tables_dataset_metadata (~.tables.TablesDatasetMetadata): + tables_dataset_metadata (google.cloud.automl_v1beta1.types.TablesDatasetMetadata): Metadata for a dataset used for Tables. name (str): Output only. The resource name of the dataset. Form: @@ -76,7 +76,7 @@ class Dataset(proto.Message): example_count (int): Output only. The number of examples in the dataset. - create_time (~.timestamp.Timestamp): + create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this dataset was created. etag (str): diff --git a/google/cloud/automl_v1beta1/types/detection.py b/google/cloud/automl_v1beta1/types/detection.py index c14a1cb5..849b6c7b 100644 --- a/google/cloud/automl_v1beta1/types/detection.py +++ b/google/cloud/automl_v1beta1/types/detection.py @@ -38,7 +38,7 @@ class ImageObjectDetectionAnnotation(proto.Message): r"""Annotation details for image object detection. Attributes: - bounding_box (~.geometry.BoundingPoly): + bounding_box (google.cloud.automl_v1beta1.types.BoundingPoly): Output only. The rectangle representing the object location. score (float): @@ -67,11 +67,11 @@ class VideoObjectTrackingAnnotation(proto.Message): effort. Especially in cases when an entity goes off-screen for a longer time (minutes), when it comes back it may be given a new instance ID. - time_offset (~.duration.Duration): + time_offset (google.protobuf.duration_pb2.Duration): Required. A time (frame) of a video to which this annotation pertains. Represented as the duration since the video's start. - bounding_box (~.geometry.BoundingPoly): + bounding_box (google.cloud.automl_v1beta1.types.BoundingPoly): Required. The rectangle representing the object location on the frame (i.e. at the time_offset of the video). score (float): @@ -105,7 +105,7 @@ class BoundingBoxMetricsEntry(proto.Message): mean_average_precision (float): Output only. The mean average precision, most often close to au_prc. - confidence_metrics_entries (Sequence[~.detection.BoundingBoxMetricsEntry.ConfidenceMetricsEntry]): + confidence_metrics_entries (Sequence[google.cloud.automl_v1beta1.types.BoundingBoxMetricsEntry.ConfidenceMetricsEntry]): Output only. Metrics for each label-match confidence_threshold from 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99. Precision-recall @@ -156,7 +156,7 @@ class ImageObjectDetectionEvaluationMetrics(proto.Message): Output only. The total number of bounding boxes (i.e. summed over all images) the ground truth used to create this evaluation had. - bounding_box_metrics_entries (Sequence[~.detection.BoundingBoxMetricsEntry]): + bounding_box_metrics_entries (Sequence[google.cloud.automl_v1beta1.types.BoundingBoxMetricsEntry]): Output only. The bounding boxes match metrics for each Intersection-over-union threshold 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 and each @@ -191,7 +191,7 @@ class VideoObjectTrackingEvaluationMetrics(proto.Message): Output only. The total number of bounding boxes (i.e. summed over all frames) the ground truth used to create this evaluation had. - bounding_box_metrics_entries (Sequence[~.detection.BoundingBoxMetricsEntry]): + bounding_box_metrics_entries (Sequence[google.cloud.automl_v1beta1.types.BoundingBoxMetricsEntry]): Output only. The bounding boxes match metrics for each Intersection-over-union threshold 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 and each diff --git a/google/cloud/automl_v1beta1/types/geometry.py b/google/cloud/automl_v1beta1/types/geometry.py index f64a477f..004b34bb 100644 --- a/google/cloud/automl_v1beta1/types/geometry.py +++ b/google/cloud/automl_v1beta1/types/geometry.py @@ -50,7 +50,7 @@ class BoundingPoly(proto.Message): by connecting vertices in the order they are listed. Attributes: - normalized_vertices (Sequence[~.geometry.NormalizedVertex]): + normalized_vertices (Sequence[google.cloud.automl_v1beta1.types.NormalizedVertex]): Output only . The bounding polygon normalized vertices. """ diff --git a/google/cloud/automl_v1beta1/types/image.py b/google/cloud/automl_v1beta1/types/image.py index 636fa469..f34e6fba 100644 --- a/google/cloud/automl_v1beta1/types/image.py +++ b/google/cloud/automl_v1beta1/types/image.py @@ -38,7 +38,7 @@ class ImageClassificationDatasetMetadata(proto.Message): r"""Dataset metadata that is specific to image classification. Attributes: - classification_type (~.classification.ClassificationType): + classification_type (google.cloud.automl_v1beta1.types.ClassificationType): Required. Type of the classification problem. """ diff --git a/google/cloud/automl_v1beta1/types/io.py b/google/cloud/automl_v1beta1/types/io.py index 5be23eb2..1aeb140d 100644 --- a/google/cloud/automl_v1beta1/types/io.py +++ b/google/cloud/automl_v1beta1/types/io.py @@ -338,13 +338,13 @@ class InputConfig(proto.Message): is listed in Operation.metadata.partial_failures. Attributes: - gcs_source (~.io.GcsSource): + gcs_source (google.cloud.automl_v1beta1.types.GcsSource): The Google Cloud Storage location for the input content. In ImportData, the gcs_source points to a csv with structure described in the comment. - bigquery_source (~.io.BigQuerySource): + bigquery_source (google.cloud.automl_v1beta1.types.BigQuerySource): The BigQuery location for the input content. - params (Sequence[~.io.InputConfig.ParamsEntry]): + params (Sequence[google.cloud.automl_v1beta1.types.InputConfig.ParamsEntry]): Additional domain-specific parameters describing the semantic of the imported data, any string must be up to 25000 characters long. @@ -521,10 +521,10 @@ class BatchPredictInputConfig(proto.Message): count cap, will be listed in Operation.metadata.partial_failures. Attributes: - gcs_source (~.io.GcsSource): + gcs_source (google.cloud.automl_v1beta1.types.GcsSource): The Google Cloud Storage location for the input content. - bigquery_source (~.io.BigQuerySource): + bigquery_source (google.cloud.automl_v1beta1.types.BigQuerySource): The BigQuery location for the input content. """ @@ -542,7 +542,7 @@ class DocumentInputConfig(proto.Message): [Document][google.cloud.automl.v1beta1.Document]. Attributes: - gcs_source (~.io.GcsSource): + gcs_source (google.cloud.automl_v1beta1.types.GcsSource): The Google Cloud Storage location of the document file. Only a single path should be given. Max supported size: 512MB. @@ -580,14 +580,14 @@ class OutputConfig(proto.Message): filled with precisely the same data as this obtained on import. Attributes: - gcs_destination (~.io.GcsDestination): + gcs_destination (google.cloud.automl_v1beta1.types.GcsDestination): The Google Cloud Storage location where the output is to be written to. For Image Object Detection, Text Extraction, Video Classification and Tables, in the given directory a new directory will be created with name: export_data-- where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. All export output will be written into that directory. - bigquery_destination (~.io.BigQueryDestination): + bigquery_destination (google.cloud.automl_v1beta1.types.BigQueryDestination): The BigQuery location where the output is to be written to. """ @@ -890,10 +890,10 @@ class BatchPredictOutputConfig(proto.Message): ``message``. Attributes: - gcs_destination (~.io.GcsDestination): + gcs_destination (google.cloud.automl_v1beta1.types.GcsDestination): The Google Cloud Storage location of the directory where the output is to be written to. - bigquery_destination (~.io.BigQueryDestination): + bigquery_destination (google.cloud.automl_v1beta1.types.BigQueryDestination): The BigQuery location where the output is to be written to. """ @@ -911,7 +911,7 @@ class ModelExportOutputConfig(proto.Message): r"""Output configuration for ModelExport Action. Attributes: - gcs_destination (~.io.GcsDestination): + gcs_destination (google.cloud.automl_v1beta1.types.GcsDestination): The Google Cloud Storage location where the model is to be written to. This location may only be set for the following model formats: "tflite", "edgetpu_tflite", "tf_saved_model", @@ -922,7 +922,7 @@ class ModelExportOutputConfig(proto.Message): YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format, will be created. Inside the model and any of its supporting files will be written. - gcr_destination (~.io.GcrDestination): + gcr_destination (google.cloud.automl_v1beta1.types.GcrDestination): The GCR location where model image is to be pushed to. This location may only be set for the following model formats: "docker". @@ -986,7 +986,7 @@ class ModelExportOutputConfig(proto.Message): //cloud.google.com/vision/automl/docs/containers-gcs-quickstart) - core_ml - Used for iOS mobile devices. - params (Sequence[~.io.ModelExportOutputConfig.ParamsEntry]): + params (Sequence[google.cloud.automl_v1beta1.types.ModelExportOutputConfig.ParamsEntry]): Additional model-type and format specific parameters describing the requirements for the to be exported model files, any string must be up to 25000 characters long. @@ -1040,7 +1040,7 @@ class ExportEvaluatedExamplesOutputConfig(proto.Message): [TablesAnnotation][google.cloud.automl.v1beta1.TablesAnnotation]. Attributes: - bigquery_destination (~.io.BigQueryDestination): + bigquery_destination (google.cloud.automl_v1beta1.types.BigQueryDestination): The BigQuery location where the output is to be written to. """ diff --git a/google/cloud/automl_v1beta1/types/model.py b/google/cloud/automl_v1beta1/types/model.py index 2b22cc72..a4df8750 100644 --- a/google/cloud/automl_v1beta1/types/model.py +++ b/google/cloud/automl_v1beta1/types/model.py @@ -33,23 +33,23 @@ class Model(proto.Message): r"""API proto representing a trained machine learning model. Attributes: - translation_model_metadata (~.translation.TranslationModelMetadata): + translation_model_metadata (google.cloud.automl_v1beta1.types.TranslationModelMetadata): Metadata for translation models. - image_classification_model_metadata (~.image.ImageClassificationModelMetadata): + image_classification_model_metadata (google.cloud.automl_v1beta1.types.ImageClassificationModelMetadata): Metadata for image classification models. - text_classification_model_metadata (~.text.TextClassificationModelMetadata): + text_classification_model_metadata (google.cloud.automl_v1beta1.types.TextClassificationModelMetadata): Metadata for text classification models. - image_object_detection_model_metadata (~.image.ImageObjectDetectionModelMetadata): + image_object_detection_model_metadata (google.cloud.automl_v1beta1.types.ImageObjectDetectionModelMetadata): Metadata for image object detection models. - video_classification_model_metadata (~.video.VideoClassificationModelMetadata): + video_classification_model_metadata (google.cloud.automl_v1beta1.types.VideoClassificationModelMetadata): Metadata for video classification models. - video_object_tracking_model_metadata (~.video.VideoObjectTrackingModelMetadata): + video_object_tracking_model_metadata (google.cloud.automl_v1beta1.types.VideoObjectTrackingModelMetadata): Metadata for video object tracking models. - text_extraction_model_metadata (~.text.TextExtractionModelMetadata): + text_extraction_model_metadata (google.cloud.automl_v1beta1.types.TextExtractionModelMetadata): Metadata for text extraction models. - tables_model_metadata (~.tables.TablesModelMetadata): + tables_model_metadata (google.cloud.automl_v1beta1.types.TablesModelMetadata): Metadata for Tables models. - text_sentiment_model_metadata (~.text.TextSentimentModelMetadata): + text_sentiment_model_metadata (google.cloud.automl_v1beta1.types.TextSentimentModelMetadata): Metadata for text sentiment models. name (str): Output only. Resource name of the model. Format: @@ -63,14 +63,14 @@ class Model(proto.Message): Required. The resource ID of the dataset used to create the model. The dataset must come from the same ancestor project and location. - create_time (~.timestamp.Timestamp): + create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when the model training finished and can be used for prediction. - update_time (~.timestamp.Timestamp): + update_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this model was last updated. - deployment_state (~.model.Model.DeploymentState): + deployment_state (google.cloud.automl_v1beta1.types.Model.DeploymentState): Output only. Deployment state of the model. A model can only serve prediction requests after it gets deployed. diff --git a/google/cloud/automl_v1beta1/types/model_evaluation.py b/google/cloud/automl_v1beta1/types/model_evaluation.py index 2027bb8a..25ff816f 100644 --- a/google/cloud/automl_v1beta1/types/model_evaluation.py +++ b/google/cloud/automl_v1beta1/types/model_evaluation.py @@ -36,27 +36,27 @@ class ModelEvaluation(proto.Message): r"""Evaluation results of a model. Attributes: - classification_evaluation_metrics (~.classification.ClassificationEvaluationMetrics): + classification_evaluation_metrics (google.cloud.automl_v1beta1.types.ClassificationEvaluationMetrics): Model evaluation metrics for image, text, video and tables classification. Tables problem is considered a classification when the target column is CATEGORY DataType. - regression_evaluation_metrics (~.regression.RegressionEvaluationMetrics): + regression_evaluation_metrics (google.cloud.automl_v1beta1.types.RegressionEvaluationMetrics): Model evaluation metrics for Tables regression. Tables problem is considered a regression when the target column has FLOAT64 DataType. - translation_evaluation_metrics (~.translation.TranslationEvaluationMetrics): + translation_evaluation_metrics (google.cloud.automl_v1beta1.types.TranslationEvaluationMetrics): Model evaluation metrics for translation. - image_object_detection_evaluation_metrics (~.detection.ImageObjectDetectionEvaluationMetrics): + image_object_detection_evaluation_metrics (google.cloud.automl_v1beta1.types.ImageObjectDetectionEvaluationMetrics): Model evaluation metrics for image object detection. - video_object_tracking_evaluation_metrics (~.detection.VideoObjectTrackingEvaluationMetrics): + video_object_tracking_evaluation_metrics (google.cloud.automl_v1beta1.types.VideoObjectTrackingEvaluationMetrics): Model evaluation metrics for video object tracking. - text_sentiment_evaluation_metrics (~.text_sentiment.TextSentimentEvaluationMetrics): + text_sentiment_evaluation_metrics (google.cloud.automl_v1beta1.types.TextSentimentEvaluationMetrics): Evaluation metrics for text sentiment models. - text_extraction_evaluation_metrics (~.text_extraction.TextExtractionEvaluationMetrics): + text_extraction_evaluation_metrics (google.cloud.automl_v1beta1.types.TextExtractionEvaluationMetrics): Evaluation metrics for text extraction models. name (str): @@ -87,7 +87,7 @@ class ModelEvaluation(proto.Message): distinct values of the target column at the moment of the model evaluation are populated here. The display_name is empty for the overall model evaluation. - create_time (~.timestamp.Timestamp): + create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Timestamp when this model evaluation was created. evaluated_example_count (int): diff --git a/google/cloud/automl_v1beta1/types/operations.py b/google/cloud/automl_v1beta1/types/operations.py index 62d893fd..e0b3204b 100644 --- a/google/cloud/automl_v1beta1/types/operations.py +++ b/google/cloud/automl_v1beta1/types/operations.py @@ -45,37 +45,37 @@ class OperationMetadata(proto.Message): AutoML API. Attributes: - delete_details (~.operations.DeleteOperationMetadata): + delete_details (google.cloud.automl_v1beta1.types.DeleteOperationMetadata): Details of a Delete operation. - deploy_model_details (~.operations.DeployModelOperationMetadata): + deploy_model_details (google.cloud.automl_v1beta1.types.DeployModelOperationMetadata): Details of a DeployModel operation. - undeploy_model_details (~.operations.UndeployModelOperationMetadata): + undeploy_model_details (google.cloud.automl_v1beta1.types.UndeployModelOperationMetadata): Details of an UndeployModel operation. - create_model_details (~.operations.CreateModelOperationMetadata): + create_model_details (google.cloud.automl_v1beta1.types.CreateModelOperationMetadata): Details of CreateModel operation. - import_data_details (~.operations.ImportDataOperationMetadata): + import_data_details (google.cloud.automl_v1beta1.types.ImportDataOperationMetadata): Details of ImportData operation. - batch_predict_details (~.operations.BatchPredictOperationMetadata): + batch_predict_details (google.cloud.automl_v1beta1.types.BatchPredictOperationMetadata): Details of BatchPredict operation. - export_data_details (~.operations.ExportDataOperationMetadata): + export_data_details (google.cloud.automl_v1beta1.types.ExportDataOperationMetadata): Details of ExportData operation. - export_model_details (~.operations.ExportModelOperationMetadata): + export_model_details (google.cloud.automl_v1beta1.types.ExportModelOperationMetadata): Details of ExportModel operation. - export_evaluated_examples_details (~.operations.ExportEvaluatedExamplesOperationMetadata): + export_evaluated_examples_details (google.cloud.automl_v1beta1.types.ExportEvaluatedExamplesOperationMetadata): Details of ExportEvaluatedExamples operation. progress_percent (int): Output only. Progress of operation. Range: [0, 100]. Not used currently. - partial_failures (Sequence[~.status.Status]): + partial_failures (Sequence[google.rpc.status_pb2.Status]): Output only. Partial failures encountered. E.g. single files that couldn't be read. This field should never exceed 20 entries. Status details field will contain standard GCP error details. - create_time (~.timestamp.Timestamp): + create_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Time when the operation was created. - update_time (~.timestamp.Timestamp): + update_time (google.protobuf.timestamp_pb2.Timestamp): Output only. Time when the operation was updated for the last time. """ @@ -175,7 +175,7 @@ class ExportDataOperationMetadata(proto.Message): r"""Details of ExportData operation. Attributes: - output_info (~.operations.ExportDataOperationMetadata.ExportDataOutputInfo): + output_info (google.cloud.automl_v1beta1.types.ExportDataOperationMetadata.ExportDataOutputInfo): Output only. Information further describing this export data's output. """ @@ -210,10 +210,10 @@ class BatchPredictOperationMetadata(proto.Message): r"""Details of BatchPredict operation. Attributes: - input_config (~.io.BatchPredictInputConfig): + input_config (google.cloud.automl_v1beta1.types.BatchPredictInputConfig): Output only. The input config that was given upon starting this batch predict operation. - output_info (~.operations.BatchPredictOperationMetadata.BatchPredictOutputInfo): + output_info (google.cloud.automl_v1beta1.types.BatchPredictOperationMetadata.BatchPredictOutputInfo): Output only. Information further describing this batch predict's output. """ @@ -253,7 +253,7 @@ class ExportModelOperationMetadata(proto.Message): r"""Details of ExportModel operation. Attributes: - output_info (~.operations.ExportModelOperationMetadata.ExportModelOutputInfo): + output_info (google.cloud.automl_v1beta1.types.ExportModelOperationMetadata.ExportModelOutputInfo): Output only. Information further describing the output of this model export. """ @@ -279,7 +279,7 @@ class ExportEvaluatedExamplesOperationMetadata(proto.Message): r"""Details of EvaluatedExamples operation. Attributes: - output_info (~.operations.ExportEvaluatedExamplesOperationMetadata.ExportEvaluatedExamplesOutputInfo): + output_info (google.cloud.automl_v1beta1.types.ExportEvaluatedExamplesOperationMetadata.ExportEvaluatedExamplesOutputInfo): Output only. Information further describing the output of this evaluated examples export. """ diff --git a/google/cloud/automl_v1beta1/types/prediction_service.py b/google/cloud/automl_v1beta1/types/prediction_service.py index 4ea8fb68..1261d5f6 100644 --- a/google/cloud/automl_v1beta1/types/prediction_service.py +++ b/google/cloud/automl_v1beta1/types/prediction_service.py @@ -42,11 +42,11 @@ class PredictRequest(proto.Message): name (str): Required. Name of the model requested to serve the prediction. - payload (~.data_items.ExamplePayload): + payload (google.cloud.automl_v1beta1.types.ExamplePayload): Required. Payload to perform a prediction on. The payload must match the problem type that the model was trained to solve. - params (Sequence[~.prediction_service.PredictRequest.ParamsEntry]): + params (Sequence[google.cloud.automl_v1beta1.types.PredictRequest.ParamsEntry]): Additional domain-specific parameters, any string must be up to 25000 characters long. @@ -83,11 +83,11 @@ class PredictResponse(proto.Message): [PredictionService.Predict][google.cloud.automl.v1beta1.PredictionService.Predict]. Attributes: - payload (Sequence[~.annotation_payload.AnnotationPayload]): + payload (Sequence[google.cloud.automl_v1beta1.types.AnnotationPayload]): Prediction result. Translation and Text Sentiment will return precisely one payload. - preprocessed_input (~.data_items.ExamplePayload): + preprocessed_input (google.cloud.automl_v1beta1.types.ExamplePayload): The preprocessed example that AutoML actually makes prediction on. Empty if AutoML does not preprocess the input example. @@ -95,7 +95,7 @@ class PredictResponse(proto.Message): - For Text Extraction: If the input is a .pdf file, the OCR'ed text will be provided in [document_text][google.cloud.automl.v1beta1.Document.document_text]. - metadata (Sequence[~.prediction_service.PredictResponse.MetadataEntry]): + metadata (Sequence[google.cloud.automl_v1beta1.types.PredictResponse.MetadataEntry]): Additional domain-specific prediction response metadata. - For Image Object Detection: ``max_bounding_box_count`` - @@ -133,13 +133,13 @@ class BatchPredictRequest(proto.Message): name (str): Required. Name of the model requested to serve the batch prediction. - input_config (~.io.BatchPredictInputConfig): + input_config (google.cloud.automl_v1beta1.types.BatchPredictInputConfig): Required. The input configuration for batch prediction. - output_config (~.io.BatchPredictOutputConfig): + output_config (google.cloud.automl_v1beta1.types.BatchPredictOutputConfig): Required. The Configuration specifying where output predictions should be written. - params (Sequence[~.prediction_service.BatchPredictRequest.ParamsEntry]): + params (Sequence[google.cloud.automl_v1beta1.types.BatchPredictRequest.ParamsEntry]): Required. Additional domain-specific parameters for the predictions, any string must be up to 25000 characters long. @@ -239,7 +239,7 @@ class BatchPredictResult(proto.Message): [PredictionService.BatchPredict][google.cloud.automl.v1beta1.PredictionService.BatchPredict]. Attributes: - metadata (Sequence[~.prediction_service.BatchPredictResult.MetadataEntry]): + metadata (Sequence[google.cloud.automl_v1beta1.types.BatchPredictResult.MetadataEntry]): Additional domain-specific prediction response metadata. - For Image Object Detection: ``max_bounding_box_count`` - diff --git a/google/cloud/automl_v1beta1/types/service.py b/google/cloud/automl_v1beta1/types/service.py index cfe23a6f..8e732af2 100644 --- a/google/cloud/automl_v1beta1/types/service.py +++ b/google/cloud/automl_v1beta1/types/service.py @@ -72,7 +72,7 @@ class CreateDatasetRequest(proto.Message): parent (str): Required. The resource name of the project to create the dataset for. - dataset (~.gca_dataset.Dataset): + dataset (google.cloud.automl_v1beta1.types.Dataset): Required. The dataset to create. """ @@ -138,7 +138,7 @@ class ListDatasetsResponse(proto.Message): [AutoMl.ListDatasets][google.cloud.automl.v1beta1.AutoMl.ListDatasets]. Attributes: - datasets (Sequence[~.gca_dataset.Dataset]): + datasets (Sequence[google.cloud.automl_v1beta1.types.Dataset]): The datasets read. next_page_token (str): A token to retrieve next page of results. Pass to @@ -162,10 +162,10 @@ class UpdateDatasetRequest(proto.Message): [AutoMl.UpdateDataset][google.cloud.automl.v1beta1.AutoMl.UpdateDataset] Attributes: - dataset (~.gca_dataset.Dataset): + dataset (google.cloud.automl_v1beta1.types.Dataset): Required. The dataset which replaces the resource on the server. - update_mask (~.gp_field_mask.FieldMask): + update_mask (google.protobuf.field_mask_pb2.FieldMask): The update mask applies to the resource. """ @@ -196,7 +196,7 @@ class ImportDataRequest(proto.Message): Required. Dataset name. Dataset must already exist. All imported annotations and examples will be added. - input_config (~.io.InputConfig): + input_config (google.cloud.automl_v1beta1.types.InputConfig): Required. The desired input location and its domain specific semantics, if any. """ @@ -213,7 +213,7 @@ class ExportDataRequest(proto.Message): Attributes: name (str): Required. The resource name of the dataset. - output_config (~.io.OutputConfig): + output_config (google.cloud.automl_v1beta1.types.OutputConfig): Required. The desired output location. """ @@ -243,7 +243,7 @@ class GetTableSpecRequest(proto.Message): name (str): Required. The resource name of the table spec to retrieve. - field_mask (~.gp_field_mask.FieldMask): + field_mask (google.protobuf.field_mask_pb2.FieldMask): Mask specifying which fields to read. """ @@ -260,7 +260,7 @@ class ListTableSpecsRequest(proto.Message): parent (str): Required. The resource name of the dataset to list table specs from. - field_mask (~.gp_field_mask.FieldMask): + field_mask (google.protobuf.field_mask_pb2.FieldMask): Mask specifying which fields to read. filter (str): Filter expression, see go/filtering. @@ -293,7 +293,7 @@ class ListTableSpecsResponse(proto.Message): [AutoMl.ListTableSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs]. Attributes: - table_specs (Sequence[~.gca_table_spec.TableSpec]): + table_specs (Sequence[google.cloud.automl_v1beta1.types.TableSpec]): The table specs read. next_page_token (str): A token to retrieve next page of results. Pass to @@ -317,10 +317,10 @@ class UpdateTableSpecRequest(proto.Message): [AutoMl.UpdateTableSpec][google.cloud.automl.v1beta1.AutoMl.UpdateTableSpec] Attributes: - table_spec (~.gca_table_spec.TableSpec): + table_spec (google.cloud.automl_v1beta1.types.TableSpec): Required. The table spec which replaces the resource on the server. - update_mask (~.gp_field_mask.FieldMask): + update_mask (google.protobuf.field_mask_pb2.FieldMask): The update mask applies to the resource. """ @@ -337,7 +337,7 @@ class GetColumnSpecRequest(proto.Message): name (str): Required. The resource name of the column spec to retrieve. - field_mask (~.gp_field_mask.FieldMask): + field_mask (google.protobuf.field_mask_pb2.FieldMask): Mask specifying which fields to read. """ @@ -354,7 +354,7 @@ class ListColumnSpecsRequest(proto.Message): parent (str): Required. The resource name of the table spec to list column specs from. - field_mask (~.gp_field_mask.FieldMask): + field_mask (google.protobuf.field_mask_pb2.FieldMask): Mask specifying which fields to read. filter (str): Filter expression, see go/filtering. @@ -387,7 +387,7 @@ class ListColumnSpecsResponse(proto.Message): [AutoMl.ListColumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs]. Attributes: - column_specs (Sequence[~.gca_column_spec.ColumnSpec]): + column_specs (Sequence[google.cloud.automl_v1beta1.types.ColumnSpec]): The column specs read. next_page_token (str): A token to retrieve next page of results. Pass to @@ -411,10 +411,10 @@ class UpdateColumnSpecRequest(proto.Message): [AutoMl.UpdateColumnSpec][google.cloud.automl.v1beta1.AutoMl.UpdateColumnSpec] Attributes: - column_spec (~.gca_column_spec.ColumnSpec): + column_spec (google.cloud.automl_v1beta1.types.ColumnSpec): Required. The column spec which replaces the resource on the server. - update_mask (~.gp_field_mask.FieldMask): + update_mask (google.protobuf.field_mask_pb2.FieldMask): The update mask applies to the resource. """ @@ -433,7 +433,7 @@ class CreateModelRequest(proto.Message): parent (str): Required. Resource name of the parent project where the model is being created. - model (~.gca_model.Model): + model (google.cloud.automl_v1beta1.types.Model): Required. The model to create. """ @@ -501,7 +501,7 @@ class ListModelsResponse(proto.Message): [AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels]. Attributes: - model (Sequence[~.gca_model.Model]): + model (Sequence[google.cloud.automl_v1beta1.types.Model]): List of models in the requested page. next_page_token (str): A token to retrieve next page of results. Pass to @@ -536,10 +536,10 @@ class DeployModelRequest(proto.Message): [AutoMl.DeployModel][google.cloud.automl.v1beta1.AutoMl.DeployModel]. Attributes: - image_object_detection_model_deployment_metadata (~.image.ImageObjectDetectionModelDeploymentMetadata): + image_object_detection_model_deployment_metadata (google.cloud.automl_v1beta1.types.ImageObjectDetectionModelDeploymentMetadata): Model deployment metadata specific to Image Object Detection. - image_classification_model_deployment_metadata (~.image.ImageClassificationModelDeploymentMetadata): + image_classification_model_deployment_metadata (google.cloud.automl_v1beta1.types.ImageClassificationModelDeploymentMetadata): Model deployment metadata specific to Image Classification. name (str): @@ -587,7 +587,7 @@ class ExportModelRequest(proto.Message): name (str): Required. The resource name of the model to export. - output_config (~.io.ModelExportOutputConfig): + output_config (google.cloud.automl_v1beta1.types.ModelExportOutputConfig): Required. The desired output location and configuration. """ @@ -607,7 +607,7 @@ class ExportEvaluatedExamplesRequest(proto.Message): name (str): Required. The resource name of the model whose evaluated examples are to be exported. - output_config (~.io.ExportEvaluatedExamplesOutputConfig): + output_config (google.cloud.automl_v1beta1.types.ExportEvaluatedExamplesOutputConfig): Required. The desired output location and configuration. """ @@ -679,7 +679,7 @@ class ListModelEvaluationsResponse(proto.Message): [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations]. Attributes: - model_evaluation (Sequence[~.gca_model_evaluation.ModelEvaluation]): + model_evaluation (Sequence[google.cloud.automl_v1beta1.types.ModelEvaluation]): List of model evaluations in the requested page. next_page_token (str): diff --git a/google/cloud/automl_v1beta1/types/table_spec.py b/google/cloud/automl_v1beta1/types/table_spec.py index 8a9b7ce5..e69c24ce 100644 --- a/google/cloud/automl_v1beta1/types/table_spec.py +++ b/google/cloud/automl_v1beta1/types/table_spec.py @@ -63,7 +63,7 @@ class TableSpec(proto.Message): Output only. The number of columns of the table. That is, the number of child ColumnSpec-s. - input_configs (Sequence[~.io.InputConfig]): + input_configs (Sequence[google.cloud.automl_v1beta1.types.InputConfig]): Output only. Input configs via which data currently residing in the table had been imported. diff --git a/google/cloud/automl_v1beta1/types/tables.py b/google/cloud/automl_v1beta1/types/tables.py index affe5e37..50b82e0f 100644 --- a/google/cloud/automl_v1beta1/types/tables.py +++ b/google/cloud/automl_v1beta1/types/tables.py @@ -83,7 +83,7 @@ class TablesDatasetMetadata(proto.Message): as ``UNASSIGNED``. NOTE: Updates of this field will instantly affect any other users concurrently working with the dataset. - target_column_correlations (Sequence[~.tables.TablesDatasetMetadata.TargetColumnCorrelationsEntry]): + target_column_correlations (Sequence[google.cloud.automl_v1beta1.types.TablesDatasetMetadata.TargetColumnCorrelationsEntry]): Output only. Correlations between [TablesDatasetMetadata.target_column_spec_id][google.cloud.automl.v1beta1.TablesDatasetMetadata.target_column_spec_id], @@ -95,7 +95,7 @@ class TablesDatasetMetadata(proto.Message): column. This field may be stale, see the stats_update_time field for for the timestamp at which these stats were last updated. - stats_update_time (~.timestamp.Timestamp): + stats_update_time (google.protobuf.timestamp_pb2.Timestamp): Output only. The most recent timestamp when target_column_correlations field and all descendant ColumnSpec.data_stats and ColumnSpec.top_correlated_columns @@ -134,7 +134,7 @@ class TablesModelMetadata(proto.Message): Required when optimization_objective is "MAXIMIZE_RECALL_AT_PRECISION". Must be between 0 and 1, inclusive. - target_column_spec (~.column_spec.ColumnSpec): + target_column_spec (google.cloud.automl_v1beta1.types.ColumnSpec): Column spec of the dataset's primary table's column the model is predicting. Snapshotted when model creation started. Only 3 fields are used: name - May be set on @@ -143,7 +143,7 @@ class TablesModelMetadata(proto.Message): model is trained from is used. If neither is set, CreateModel will error. display_name - Output only. data_type - Output only. - input_feature_column_specs (Sequence[~.column_spec.ColumnSpec]): + input_feature_column_specs (Sequence[google.cloud.automl_v1beta1.types.ColumnSpec]): Column specs of the dataset's primary table's columns, on which the model is trained and which are used as the input for predictions. The @@ -193,7 +193,7 @@ class TablesModelMetadata(proto.Message): root-mean-squared error (RMSE). "MINIMIZE_MAE" - Minimize mean-absolute error (MAE). "MINIMIZE_RMSLE" - Minimize root-mean-squared log error (RMSLE). - tables_model_column_info (Sequence[~.tables.TablesModelColumnInfo]): + tables_model_column_info (Sequence[google.cloud.automl_v1beta1.types.TablesModelColumnInfo]): Output only. Auxiliary information for each of the input_feature_column_specs with respect to this particular model. @@ -268,13 +268,13 @@ class TablesAnnotation(proto.Message): [target_column_spec][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec] of FLOAT64 data type the score is not populated. - prediction_interval (~.ranges.DoubleRange): + prediction_interval (google.cloud.automl_v1beta1.types.DoubleRange): Output only. Only populated when [target_column_spec][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec] has FLOAT64 data type. An interval in which the exactly correct target value has 95% chance to be in. - value (~.struct.Value): + value (google.protobuf.struct_pb2.Value): The predicted value of the row's [target_column][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec]. @@ -285,7 +285,7 @@ class TablesAnnotation(proto.Message): - FLOAT64 - the predicted (with above ``prediction_interval``) FLOAT64 value. - tables_model_column_info (Sequence[~.tables.TablesModelColumnInfo]): + tables_model_column_info (Sequence[google.cloud.automl_v1beta1.types.TablesModelColumnInfo]): Output only. Auxiliary information for each of the model's [input_feature_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.input_feature_column_specs] diff --git a/google/cloud/automl_v1beta1/types/temporal.py b/google/cloud/automl_v1beta1/types/temporal.py index 442ff4b5..6334e502 100644 --- a/google/cloud/automl_v1beta1/types/temporal.py +++ b/google/cloud/automl_v1beta1/types/temporal.py @@ -31,11 +31,11 @@ class TimeSegment(proto.Message): (e.g. video). Attributes: - start_time_offset (~.duration.Duration): + start_time_offset (google.protobuf.duration_pb2.Duration): Start of the time segment (inclusive), represented as the duration since the example start. - end_time_offset (~.duration.Duration): + end_time_offset (google.protobuf.duration_pb2.Duration): End of the time segment (exclusive), represented as the duration since the example start. diff --git a/google/cloud/automl_v1beta1/types/text.py b/google/cloud/automl_v1beta1/types/text.py index bc2b888c..83c01a1b 100644 --- a/google/cloud/automl_v1beta1/types/text.py +++ b/google/cloud/automl_v1beta1/types/text.py @@ -38,7 +38,7 @@ class TextClassificationDatasetMetadata(proto.Message): r"""Dataset metadata for classification. Attributes: - classification_type (~.classification.ClassificationType): + classification_type (google.cloud.automl_v1beta1.types.ClassificationType): Required. Type of the classification problem. """ @@ -51,7 +51,7 @@ class TextClassificationModelMetadata(proto.Message): r"""Model metadata that is specific to text classification. Attributes: - classification_type (~.classification.ClassificationType): + classification_type (google.cloud.automl_v1beta1.types.ClassificationType): Output only. Classification type of the dataset used to train this model. """ diff --git a/google/cloud/automl_v1beta1/types/text_extraction.py b/google/cloud/automl_v1beta1/types/text_extraction.py index 4193fc0e..13fd60f2 100644 --- a/google/cloud/automl_v1beta1/types/text_extraction.py +++ b/google/cloud/automl_v1beta1/types/text_extraction.py @@ -31,7 +31,7 @@ class TextExtractionAnnotation(proto.Message): r"""Annotation for identifying spans of text. Attributes: - text_segment (~.gca_text_segment.TextSegment): + text_segment (google.cloud.automl_v1beta1.types.TextSegment): An entity annotation will set this, which is the part of the original text to which the annotation pertains. @@ -58,7 +58,7 @@ class TextExtractionEvaluationMetrics(proto.Message): au_prc (float): Output only. The Area under precision recall curve metric. - confidence_metrics_entries (Sequence[~.text_extraction.TextExtractionEvaluationMetrics.ConfidenceMetricsEntry]): + confidence_metrics_entries (Sequence[google.cloud.automl_v1beta1.types.TextExtractionEvaluationMetrics.ConfidenceMetricsEntry]): Output only. Metrics that have confidence thresholds. Precision-recall curve can be derived from it. diff --git a/google/cloud/automl_v1beta1/types/text_sentiment.py b/google/cloud/automl_v1beta1/types/text_sentiment.py index da055e23..16206eb1 100644 --- a/google/cloud/automl_v1beta1/types/text_sentiment.py +++ b/google/cloud/automl_v1beta1/types/text_sentiment.py @@ -80,7 +80,7 @@ class TextSentimentEvaluationMetrics(proto.Message): Output only. Quadratic weighted kappa. Only set for the overall model evaluation, not for evaluation of a single annotation spec. - confusion_matrix (~.classification.ClassificationEvaluationMetrics.ConfusionMatrix): + confusion_matrix (google.cloud.automl_v1beta1.types.ClassificationEvaluationMetrics.ConfusionMatrix): Output only. Confusion matrix of the evaluation. Only set for the overall model evaluation, not for evaluation of a single diff --git a/google/cloud/automl_v1beta1/types/translation.py b/google/cloud/automl_v1beta1/types/translation.py index 9c7491e0..34ba6f24 100644 --- a/google/cloud/automl_v1beta1/types/translation.py +++ b/google/cloud/automl_v1beta1/types/translation.py @@ -93,7 +93,7 @@ class TranslationAnnotation(proto.Message): r"""Annotation details specific to translation. Attributes: - translated_content (~.data_items.TextSnippet): + translated_content (google.cloud.automl_v1beta1.types.TextSnippet): Output only . The translated content. """ diff --git a/noxfile.py b/noxfile.py index f3748952..c9f48fb6 100644 --- a/noxfile.py +++ b/noxfile.py @@ -18,6 +18,7 @@ from __future__ import absolute_import import os +import pathlib import shutil import nox @@ -30,6 +31,22 @@ SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"] UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9"] +CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() + +# 'docfx' is excluded since it only needs to run in 'docs-presubmit' +nox.options.sessions = [ + "unit", + "system", + "cover", + "lint", + "lint_setup_py", + "blacken", + "docs", +] + +# Error if a python version is missing +nox.options.error_on_missing_interpreters = True + @nox.session(python=DEFAULT_PYTHON_VERSION) def lint(session): @@ -70,17 +87,21 @@ def lint_setup_py(session): def default(session): # Install all test dependencies, then install this package in-place. - session.install("asyncmock", "pytest-asyncio") - session.install( - "mock", "pytest", "pytest-cov", + constraints_path = str( + CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt" ) - session.install("-e", ".[pandas,storage]") + session.install("asyncmock", "pytest-asyncio", "-c", constraints_path) + + session.install("mock", "pytest", "pytest-cov", "-c", constraints_path) + + session.install("-e", ".[pandas,storage]", "-c", constraints_path) # Run py.test against the unit tests. session.run( "py.test", "--quiet", + f"--junitxml=unit_{session.python}_sponge_log.xml", "--cov=google/cloud", "--cov=tests/unit", "--cov-append", @@ -101,6 +122,9 @@ def unit(session): @nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) def system(session): """Run the system test suite.""" + constraints_path = str( + CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt" + ) system_test_path = os.path.join("tests", "system.py") system_test_folder_path = os.path.join("tests", "system") @@ -110,6 +134,9 @@ def system(session): # Sanity check: Only run tests if the environment variable is set. if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""): session.skip("Credentials must be set via environment variable") + # Install pyopenssl for mTLS testing. + if os.environ.get("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true": + session.install("pyopenssl") system_test_exists = os.path.exists(system_test_path) system_test_folder_exists = os.path.exists(system_test_folder_path) @@ -122,16 +149,26 @@ def system(session): # Install all test dependencies, then install this package into the # virtualenv's dist-packages. - session.install( - "mock", "pytest", "google-cloud-testutils", - ) - session.install("-e", ".[pandas,storage]") + session.install("mock", "pytest", "google-cloud-testutils", "-c", constraints_path) + session.install("-e", ".[pandas,storage]", "-c", constraints_path) # Run py.test against the system tests. if system_test_exists: - session.run("py.test", "--quiet", system_test_path, *session.posargs) + session.run( + "py.test", + "--quiet", + f"--junitxml=system_{session.python}_sponge_log.xml", + system_test_path, + *session.posargs, + ) if system_test_folder_exists: - session.run("py.test", "--quiet", system_test_folder_path, *session.posargs) + session.run( + "py.test", + "--quiet", + f"--junitxml=system_{session.python}_sponge_log.xml", + system_test_folder_path, + *session.posargs, + ) @nox.session(python=DEFAULT_PYTHON_VERSION) @@ -173,9 +210,9 @@ def docfx(session): """Build the docfx yaml files for this library.""" session.install("-e", ".[pandas,storage]") - # sphinx-docfx-yaml supports up to sphinx version 1.5.5. - # https://github.com/docascode/sphinx-docfx-yaml/issues/97 - session.install("sphinx==1.5.5", "alabaster", "recommonmark", "sphinx-docfx-yaml") + session.install( + "sphinx<3.0.0", "alabaster", "recommonmark", "gcp-sphinx-docfx-yaml" + ) shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) session.run( diff --git a/renovate.json b/renovate.json index 4fa94931..f08bc22c 100644 --- a/renovate.json +++ b/renovate.json @@ -1,5 +1,6 @@ { "extends": [ "config:base", ":preserveSemverRanges" - ] + ], + "ignorePaths": [".pre-commit-config.yaml"] } diff --git a/samples/beta/noxfile.py b/samples/beta/noxfile.py index bca0522e..97bf7da8 100644 --- a/samples/beta/noxfile.py +++ b/samples/beta/noxfile.py @@ -85,7 +85,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to tested samples. -ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] +ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8", "3.9"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] diff --git a/samples/snippets/noxfile.py b/samples/snippets/noxfile.py index bbd25fcd..97bf7da8 100644 --- a/samples/snippets/noxfile.py +++ b/samples/snippets/noxfile.py @@ -38,25 +38,28 @@ TEST_CONFIG = { # You can opt out from the test for specific Python versions. - "ignored_versions": ["2.7"], + 'ignored_versions': ["2.7"], + # Old samples are opted out of enforcing Python type hints # All new samples should feature them - "enforce_type_hints": False, + 'enforce_type_hints': False, + # An envvar key for determining the project id to use. Change it # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", + 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', + # A dictionary you want to inject into your test. Don't put any # secrets here. These values will override predefined values. - "envs": {}, + 'envs': {}, } try: # Ensure we can import noxfile_config in the project's directory. - sys.path.append(".") + sys.path.append('.') from noxfile_config import TEST_CONFIG_OVERRIDE except ImportError as e: print("No user noxfile_config found: detail: {}".format(e)) @@ -71,21 +74,21 @@ def get_pytest_env_vars() -> Dict[str, str]: ret = {} # Override the GCLOUD_PROJECT and the alias. - env_key = TEST_CONFIG["gcloud_project_env"] + env_key = TEST_CONFIG['gcloud_project_env'] # This should error out if not set. - ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] + ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] # Apply user supplied envs. - ret.update(TEST_CONFIG["envs"]) + ret.update(TEST_CONFIG['envs']) return ret # DO NOT EDIT - automatically generated. # All versions used to tested samples. -ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] +ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8", "3.9"] # Any default versions that should be ignored. -IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] +IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) @@ -134,7 +137,7 @@ def _determine_local_import_names(start_dir: str) -> List[str]: @nox.session def lint(session: nox.sessions.Session) -> None: - if not TEST_CONFIG["enforce_type_hints"]: + if not TEST_CONFIG['enforce_type_hints']: session.install("flake8", "flake8-import-order") else: session.install("flake8", "flake8-import-order", "flake8-annotations") @@ -143,11 +146,9 @@ def lint(session: nox.sessions.Session) -> None: args = FLAKE8_COMMON_ARGS + [ "--application-import-names", ",".join(local_names), - ".", + "." ] session.run("flake8", *args) - - # # Black # @@ -160,7 +161,6 @@ def blacken(session: nox.sessions.Session) -> None: session.run("black", *python_files) - # # Sample Tests # @@ -169,9 +169,7 @@ def blacken(session: nox.sessions.Session) -> None: PYTEST_COMMON_ARGS = ["--junitxml=sponge_log.xml"] -def _session_tests( - session: nox.sessions.Session, post_install: Callable = None -) -> None: +def _session_tests(session: nox.sessions.Session, post_install: Callable = None) -> None: """Runs py.test for a particular project.""" if os.path.exists("requirements.txt"): session.install("-r", "requirements.txt") @@ -202,9 +200,9 @@ def py(session: nox.sessions.Session) -> None: if session.python in TESTED_VERSIONS: _session_tests(session) else: - session.skip( - "SKIPPED: {} tests are disabled for this sample.".format(session.python) - ) + session.skip("SKIPPED: {} tests are disabled for this sample.".format( + session.python + )) # diff --git a/samples/tables/noxfile.py b/samples/tables/noxfile.py index bca0522e..97bf7da8 100644 --- a/samples/tables/noxfile.py +++ b/samples/tables/noxfile.py @@ -85,7 +85,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to tested samples. -ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] +ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8", "3.9"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] diff --git a/setup.py b/setup.py index 6cc0bfad..243cfc2b 100644 --- a/setup.py +++ b/setup.py @@ -22,12 +22,12 @@ version = "2.2.0" release_status = "Development Status :: 5 - Production/Stable" dependencies = [ - "google-api-core[grpc] >= 1.22.0, < 2.0.0dev", + "google-api-core[grpc] >= 1.22.2, < 2.0.0dev", "proto-plus >= 1.10.0", - "libcst >= 0.2.5", ] extras = { - "pandas": ["pandas>=0.17.1"], + "libcst": "libcst >= 0.2.5", + "pandas": ["pandas>=0.23.0"], "storage": ["google-cloud-storage >= 1.18.0, < 2.0.0dev"], } diff --git a/synth.metadata b/synth.metadata index d937b106..dd41beb5 100644 --- a/synth.metadata +++ b/synth.metadata @@ -3,30 +3,30 @@ { "git": { "name": ".", - "remote": "https://github.com/googleapis/python-automl.git", - "sha": "356eb12708f8219644160f59775e4aae18b66f24" + "remote": "git@github.com:googleapis/python-automl", + "sha": "2ab99f33201890ae2ad036aaf3a662257c564abb" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "6dae98144d466d4f985b926baec6208b01572f55", - "internalRef": "347459563" + "sha": "56fc6d43fed71188d7e18f3ca003544646c4ab35", + "internalRef": "366346972" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "373861061648b5fe5e0ac4f8a38b32d639ee93e4" + "sha": "ff39353f34a36e7643b86e97724e4027ab466dc6" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "373861061648b5fe5e0ac4f8a38b32d639ee93e4" + "sha": "ff39353f34a36e7643b86e97724e4027ab466dc6" } } ], @@ -49,224 +49,5 @@ "generator": "bazel" } } - ], - "generatedFiles": [ - ".coveragerc", - ".flake8", - ".github/CONTRIBUTING.md", - ".github/ISSUE_TEMPLATE/bug_report.md", - ".github/ISSUE_TEMPLATE/feature_request.md", - ".github/ISSUE_TEMPLATE/support_request.md", - ".github/PULL_REQUEST_TEMPLATE.md", - ".github/release-please.yml", - ".github/snippet-bot.yml", - ".gitignore", - ".kokoro/build.sh", - ".kokoro/continuous/common.cfg", - ".kokoro/continuous/continuous.cfg", - ".kokoro/docker/docs/Dockerfile", - ".kokoro/docker/docs/fetch_gpg_keys.sh", - ".kokoro/docs/common.cfg", - ".kokoro/docs/docs-presubmit.cfg", - ".kokoro/docs/docs.cfg", - ".kokoro/populate-secrets.sh", - ".kokoro/presubmit/common.cfg", - ".kokoro/presubmit/presubmit.cfg", - ".kokoro/publish-docs.sh", - ".kokoro/release.sh", - ".kokoro/release/common.cfg", - ".kokoro/release/release.cfg", - ".kokoro/samples/lint/common.cfg", - ".kokoro/samples/lint/continuous.cfg", - ".kokoro/samples/lint/periodic.cfg", - ".kokoro/samples/lint/presubmit.cfg", - ".kokoro/samples/python3.6/common.cfg", - ".kokoro/samples/python3.6/continuous.cfg", - ".kokoro/samples/python3.6/periodic.cfg", - ".kokoro/samples/python3.6/presubmit.cfg", - ".kokoro/samples/python3.7/common.cfg", - ".kokoro/samples/python3.7/continuous.cfg", - ".kokoro/samples/python3.7/periodic.cfg", - ".kokoro/samples/python3.7/presubmit.cfg", - ".kokoro/samples/python3.8/common.cfg", - ".kokoro/samples/python3.8/continuous.cfg", - ".kokoro/samples/python3.8/periodic.cfg", - ".kokoro/samples/python3.8/presubmit.cfg", - ".kokoro/test-samples.sh", - ".kokoro/trampoline.sh", - ".kokoro/trampoline_v2.sh", - ".pre-commit-config.yaml", - ".trampolinerc", - "CODE_OF_CONDUCT.md", - "CONTRIBUTING.rst", - "LICENSE", - "MANIFEST.in", - "docs/_static/custom.css", - "docs/_templates/layout.html", - "docs/automl_v1/services.rst", - "docs/automl_v1/types.rst", - "docs/automl_v1beta1/services.rst", - "docs/automl_v1beta1/types.rst", - "docs/conf.py", - "docs/multiprocessing.rst", - "google/cloud/automl/__init__.py", - "google/cloud/automl/py.typed", - "google/cloud/automl_v1/__init__.py", - "google/cloud/automl_v1/proto/annotation_payload.proto", - "google/cloud/automl_v1/proto/annotation_spec.proto", - "google/cloud/automl_v1/proto/classification.proto", - "google/cloud/automl_v1/proto/data_items.proto", - "google/cloud/automl_v1/proto/dataset.proto", - "google/cloud/automl_v1/proto/detection.proto", - "google/cloud/automl_v1/proto/geometry.proto", - "google/cloud/automl_v1/proto/image.proto", - "google/cloud/automl_v1/proto/io.proto", - "google/cloud/automl_v1/proto/model.proto", - "google/cloud/automl_v1/proto/model_evaluation.proto", - "google/cloud/automl_v1/proto/operations.proto", - "google/cloud/automl_v1/proto/prediction_service.proto", - "google/cloud/automl_v1/proto/service.proto", - "google/cloud/automl_v1/proto/text.proto", - "google/cloud/automl_v1/proto/text_extraction.proto", - "google/cloud/automl_v1/proto/text_segment.proto", - "google/cloud/automl_v1/proto/text_sentiment.proto", - "google/cloud/automl_v1/proto/translation.proto", - "google/cloud/automl_v1/py.typed", - "google/cloud/automl_v1/services/__init__.py", - "google/cloud/automl_v1/services/auto_ml/__init__.py", - "google/cloud/automl_v1/services/auto_ml/async_client.py", - "google/cloud/automl_v1/services/auto_ml/client.py", - "google/cloud/automl_v1/services/auto_ml/pagers.py", - "google/cloud/automl_v1/services/auto_ml/transports/__init__.py", - "google/cloud/automl_v1/services/auto_ml/transports/base.py", - "google/cloud/automl_v1/services/auto_ml/transports/grpc.py", - "google/cloud/automl_v1/services/auto_ml/transports/grpc_asyncio.py", - "google/cloud/automl_v1/services/prediction_service/__init__.py", - "google/cloud/automl_v1/services/prediction_service/async_client.py", - "google/cloud/automl_v1/services/prediction_service/client.py", - "google/cloud/automl_v1/services/prediction_service/transports/__init__.py", - "google/cloud/automl_v1/services/prediction_service/transports/base.py", - "google/cloud/automl_v1/services/prediction_service/transports/grpc.py", - "google/cloud/automl_v1/services/prediction_service/transports/grpc_asyncio.py", - "google/cloud/automl_v1/types/__init__.py", - "google/cloud/automl_v1/types/annotation_payload.py", - "google/cloud/automl_v1/types/annotation_spec.py", - "google/cloud/automl_v1/types/classification.py", - "google/cloud/automl_v1/types/data_items.py", - "google/cloud/automl_v1/types/dataset.py", - "google/cloud/automl_v1/types/detection.py", - "google/cloud/automl_v1/types/geometry.py", - "google/cloud/automl_v1/types/image.py", - "google/cloud/automl_v1/types/io.py", - "google/cloud/automl_v1/types/model.py", - "google/cloud/automl_v1/types/model_evaluation.py", - "google/cloud/automl_v1/types/operations.py", - "google/cloud/automl_v1/types/prediction_service.py", - "google/cloud/automl_v1/types/service.py", - "google/cloud/automl_v1/types/text.py", - "google/cloud/automl_v1/types/text_extraction.py", - "google/cloud/automl_v1/types/text_segment.py", - "google/cloud/automl_v1/types/text_sentiment.py", - "google/cloud/automl_v1/types/translation.py", - "google/cloud/automl_v1beta1/__init__.py", - "google/cloud/automl_v1beta1/proto/annotation_payload.proto", - "google/cloud/automl_v1beta1/proto/annotation_spec.proto", - "google/cloud/automl_v1beta1/proto/classification.proto", - "google/cloud/automl_v1beta1/proto/column_spec.proto", - "google/cloud/automl_v1beta1/proto/data_items.proto", - "google/cloud/automl_v1beta1/proto/data_stats.proto", - "google/cloud/automl_v1beta1/proto/data_types.proto", - "google/cloud/automl_v1beta1/proto/dataset.proto", - "google/cloud/automl_v1beta1/proto/detection.proto", - "google/cloud/automl_v1beta1/proto/geometry.proto", - "google/cloud/automl_v1beta1/proto/image.proto", - "google/cloud/automl_v1beta1/proto/io.proto", - "google/cloud/automl_v1beta1/proto/model.proto", - "google/cloud/automl_v1beta1/proto/model_evaluation.proto", - "google/cloud/automl_v1beta1/proto/operations.proto", - "google/cloud/automl_v1beta1/proto/prediction_service.proto", - "google/cloud/automl_v1beta1/proto/ranges.proto", - "google/cloud/automl_v1beta1/proto/regression.proto", - "google/cloud/automl_v1beta1/proto/service.proto", - "google/cloud/automl_v1beta1/proto/table_spec.proto", - "google/cloud/automl_v1beta1/proto/tables.proto", - "google/cloud/automl_v1beta1/proto/temporal.proto", - "google/cloud/automl_v1beta1/proto/text.proto", - "google/cloud/automl_v1beta1/proto/text_extraction.proto", - "google/cloud/automl_v1beta1/proto/text_segment.proto", - "google/cloud/automl_v1beta1/proto/text_sentiment.proto", - "google/cloud/automl_v1beta1/proto/translation.proto", - "google/cloud/automl_v1beta1/proto/video.proto", - "google/cloud/automl_v1beta1/py.typed", - "google/cloud/automl_v1beta1/services/__init__.py", - "google/cloud/automl_v1beta1/services/auto_ml/__init__.py", - "google/cloud/automl_v1beta1/services/auto_ml/async_client.py", - "google/cloud/automl_v1beta1/services/auto_ml/client.py", - "google/cloud/automl_v1beta1/services/auto_ml/pagers.py", - "google/cloud/automl_v1beta1/services/auto_ml/transports/__init__.py", - "google/cloud/automl_v1beta1/services/auto_ml/transports/base.py", - "google/cloud/automl_v1beta1/services/auto_ml/transports/grpc.py", - "google/cloud/automl_v1beta1/services/auto_ml/transports/grpc_asyncio.py", - "google/cloud/automl_v1beta1/services/prediction_service/__init__.py", - "google/cloud/automl_v1beta1/services/prediction_service/async_client.py", - "google/cloud/automl_v1beta1/services/prediction_service/client.py", - "google/cloud/automl_v1beta1/services/prediction_service/transports/__init__.py", - "google/cloud/automl_v1beta1/services/prediction_service/transports/base.py", - "google/cloud/automl_v1beta1/services/prediction_service/transports/grpc.py", - "google/cloud/automl_v1beta1/services/prediction_service/transports/grpc_asyncio.py", - "google/cloud/automl_v1beta1/types/__init__.py", - "google/cloud/automl_v1beta1/types/annotation_payload.py", - "google/cloud/automl_v1beta1/types/annotation_spec.py", - "google/cloud/automl_v1beta1/types/classification.py", - "google/cloud/automl_v1beta1/types/column_spec.py", - "google/cloud/automl_v1beta1/types/data_items.py", - "google/cloud/automl_v1beta1/types/data_stats.py", - "google/cloud/automl_v1beta1/types/data_types.py", - "google/cloud/automl_v1beta1/types/dataset.py", - "google/cloud/automl_v1beta1/types/detection.py", - "google/cloud/automl_v1beta1/types/geometry.py", - "google/cloud/automl_v1beta1/types/image.py", - "google/cloud/automl_v1beta1/types/io.py", - "google/cloud/automl_v1beta1/types/model.py", - "google/cloud/automl_v1beta1/types/model_evaluation.py", - "google/cloud/automl_v1beta1/types/operations.py", - "google/cloud/automl_v1beta1/types/prediction_service.py", - "google/cloud/automl_v1beta1/types/ranges.py", - "google/cloud/automl_v1beta1/types/regression.py", - "google/cloud/automl_v1beta1/types/service.py", - "google/cloud/automl_v1beta1/types/table_spec.py", - "google/cloud/automl_v1beta1/types/tables.py", - "google/cloud/automl_v1beta1/types/temporal.py", - "google/cloud/automl_v1beta1/types/text.py", - "google/cloud/automl_v1beta1/types/text_extraction.py", - "google/cloud/automl_v1beta1/types/text_segment.py", - "google/cloud/automl_v1beta1/types/text_sentiment.py", - "google/cloud/automl_v1beta1/types/translation.py", - "google/cloud/automl_v1beta1/types/video.py", - "mypy.ini", - "noxfile.py", - "renovate.json", - "samples/AUTHORING_GUIDE.md", - "samples/CONTRIBUTING.md", - "samples/beta/noxfile.py", - "samples/snippets/noxfile.py", - "samples/tables/noxfile.py", - "scripts/decrypt-secrets.sh", - "scripts/fixup_automl_v1_keywords.py", - "scripts/fixup_automl_v1beta1_keywords.py", - "scripts/readme-gen/readme_gen.py", - "scripts/readme-gen/templates/README.tmpl.rst", - "scripts/readme-gen/templates/auth.tmpl.rst", - "scripts/readme-gen/templates/auth_api_key.tmpl.rst", - "scripts/readme-gen/templates/install_deps.tmpl.rst", - "scripts/readme-gen/templates/install_portaudio.tmpl.rst", - "setup.cfg", - "testing/.gitignore", - "tests/unit/gapic/automl_v1/__init__.py", - "tests/unit/gapic/automl_v1/test_auto_ml.py", - "tests/unit/gapic/automl_v1/test_prediction_service.py", - "tests/unit/gapic/automl_v1beta1/__init__.py", - "tests/unit/gapic/automl_v1beta1/test_auto_ml.py", - "tests/unit/gapic/automl_v1beta1/test_prediction_service.py" ] } \ No newline at end of file diff --git a/synth.py b/synth.py index 638228ec..1dc95a70 100644 --- a/synth.py +++ b/synth.py @@ -37,7 +37,7 @@ ) - s.move(library, excludes=["README.rst", "docs/index.rst", "setup.py"]) + s.move(library, excludes=["README.rst", "docs/index.rst", "setup.py", "*.tar.gz"]) # Add TablesClient and GcsClient to v1beta1 s.replace( @@ -61,8 +61,8 @@ """(google\.cloud\.automl_v1beta1\.services\.prediction_service :members: :inherited-members:)""", - """\g<1>\n.. automodule:: google.cloud.automl_v1beta1.services.tables - :members: + """\g<1>\n.. automodule:: google.cloud.automl_v1beta1.services.tables + :members: :inherited-members:""" ) @@ -70,7 +70,9 @@ # Add templated files # ---------------------------------------------------------------------------- templated_files = common.py_library( - unit_cov_level=82, cov_level=83, samples=True, microgenerator=True + unit_cov_level=82, cov_level=83, samples=True, microgenerator=True, + unit_test_extras=["pandas", "storage"], + system_test_extras=["pandas", "storage"] ) python.py_samples(skip_readmes=True) @@ -81,9 +83,9 @@ s.replace("noxfile.py", """['"]sphinx['"]""", '"sphinx<3.0.0"') # TODO(busunkim): Remove after microgenerator transition. # This is being added to AutoML because the proto comments are long and -# regex replaces are a brittle temporary solution. +# regex replaces are a brittle temporary solution. s.replace( -"noxfile.py", +"noxfile.py", """'-W', # warnings as errors \s+'-T', \# show full traceback on exception""", """"-T", # show full traceback on exception""") diff --git a/testing/constraints-3.10.txt b/testing/constraints-3.10.txt new file mode 100644 index 00000000..e69de29b diff --git a/testing/constraints-3.11.txt b/testing/constraints-3.11.txt new file mode 100644 index 00000000..e69de29b diff --git a/testing/constraints-3.6.txt b/testing/constraints-3.6.txt new file mode 100644 index 00000000..580415db --- /dev/null +++ b/testing/constraints-3.6.txt @@ -0,0 +1,12 @@ +# This constraints file is used to check that lower bounds +# are correct in setup.py +# List *all* library dependencies and extras in this file. +# Pin the version to the lower bound. +# +# e.g., if setup.py has "foo >= 1.14.0, < 2.0.0dev", +# Then this file should have foo==1.14.0 +google-api-core==1.22.2 +proto-plus==1.10.0 +libcst==0.2.5 +pandas==0.23.0 +google-cloud-storage==1.18.0 diff --git a/testing/constraints-3.7.txt b/testing/constraints-3.7.txt new file mode 100644 index 00000000..e69de29b diff --git a/testing/constraints-3.8.txt b/testing/constraints-3.8.txt new file mode 100644 index 00000000..e69de29b diff --git a/testing/constraints-3.9.txt b/testing/constraints-3.9.txt new file mode 100644 index 00000000..e69de29b diff --git a/tests/unit/gapic/automl_v1/__init__.py b/tests/unit/gapic/automl_v1/__init__.py index 8b137891..42ffdf2b 100644 --- a/tests/unit/gapic/automl_v1/__init__.py +++ b/tests/unit/gapic/automl_v1/__init__.py @@ -1 +1,16 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/tests/unit/gapic/automl_v1/test_auto_ml.py b/tests/unit/gapic/automl_v1/test_auto_ml.py index ccf60d0e..c00be626 100644 --- a/tests/unit/gapic/automl_v1/test_auto_ml.py +++ b/tests/unit/gapic/automl_v1/test_auto_ml.py @@ -99,7 +99,22 @@ def test__get_default_mtls_endpoint(): assert AutoMlClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi -@pytest.mark.parametrize("client_class", [AutoMlClient, AutoMlAsyncClient]) +@pytest.mark.parametrize("client_class", [AutoMlClient, AutoMlAsyncClient,]) +def test_auto_ml_client_from_service_account_info(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == "automl.googleapis.com:443" + + +@pytest.mark.parametrize("client_class", [AutoMlClient, AutoMlAsyncClient,]) def test_auto_ml_client_from_service_account_file(client_class): creds = credentials.AnonymousCredentials() with mock.patch.object( @@ -108,16 +123,21 @@ def test_auto_ml_client_from_service_account_file(client_class): factory.return_value = creds client = client_class.from_service_account_file("dummy/file/path.json") assert client.transport._credentials == creds + assert isinstance(client, client_class) client = client_class.from_service_account_json("dummy/file/path.json") assert client.transport._credentials == creds + assert isinstance(client, client_class) assert client.transport._host == "automl.googleapis.com:443" def test_auto_ml_client_get_transport_class(): transport = AutoMlClient.get_transport_class() - assert transport == transports.AutoMlGrpcTransport + available_transports = [ + transports.AutoMlGrpcTransport, + ] + assert transport in available_transports transport = AutoMlClient.get_transport_class("grpc") assert transport == transports.AutoMlGrpcTransport @@ -158,7 +178,7 @@ def test_auto_ml_client_client_options(client_class, transport_class, transport_ credentials_file=None, host="squid.clam.whelk", scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -174,7 +194,7 @@ def test_auto_ml_client_client_options(client_class, transport_class, transport_ credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -190,7 +210,7 @@ def test_auto_ml_client_client_options(client_class, transport_class, transport_ credentials_file=None, host=client.DEFAULT_MTLS_ENDPOINT, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -218,7 +238,7 @@ def test_auto_ml_client_client_options(client_class, transport_class, transport_ credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id="octopus", client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -265,29 +285,25 @@ def test_auto_ml_client_mtls_env_auto( client_cert_source=client_cert_source_callback ) with mock.patch.object(transport_class, "__init__") as patched: - ssl_channel_creds = mock.Mock() - with mock.patch( - "grpc.ssl_channel_credentials", return_value=ssl_channel_creds - ): - patched.return_value = None - client = client_class(client_options=options) + patched.return_value = None + client = client_class(client_options=options) - if use_client_cert_env == "false": - expected_ssl_channel_creds = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_ssl_channel_creds = ssl_channel_creds - expected_host = client.DEFAULT_MTLS_ENDPOINT + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - ssl_channel_credentials=expected_ssl_channel_creds, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) # Check the case ADC client cert is provided. Whether client cert is used depends on # GOOGLE_API_USE_CLIENT_CERTIFICATE value. @@ -296,66 +312,53 @@ def test_auto_ml_client_mtls_env_auto( ): with mock.patch.object(transport_class, "__init__") as patched: with mock.patch( - "google.auth.transport.grpc.SslCredentials.__init__", return_value=None + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, ): with mock.patch( - "google.auth.transport.grpc.SslCredentials.is_mtls", - new_callable=mock.PropertyMock, - ) as is_mtls_mock: - with mock.patch( - "google.auth.transport.grpc.SslCredentials.ssl_credentials", - new_callable=mock.PropertyMock, - ) as ssl_credentials_mock: - if use_client_cert_env == "false": - is_mtls_mock.return_value = False - ssl_credentials_mock.return_value = None - expected_host = client.DEFAULT_ENDPOINT - expected_ssl_channel_creds = None - else: - is_mtls_mock.return_value = True - ssl_credentials_mock.return_value = mock.Mock() - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_ssl_channel_creds = ( - ssl_credentials_mock.return_value - ) - - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - ssl_channel_credentials=expected_ssl_channel_creds, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict( - os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} - ): - with mock.patch.object(transport_class, "__init__") as patched: - with mock.patch( - "google.auth.transport.grpc.SslCredentials.__init__", return_value=None - ): - with mock.patch( - "google.auth.transport.grpc.SslCredentials.is_mtls", - new_callable=mock.PropertyMock, - ) as is_mtls_mock: - is_mtls_mock.return_value = False patched.return_value = None client = client_class() patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=expected_host, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=expected_client_cert_source, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + @pytest.mark.parametrize( "client_class,transport_class,transport_name", @@ -377,7 +380,7 @@ def test_auto_ml_client_client_options_scopes( credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=["1", "2"], - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -403,7 +406,7 @@ def test_auto_ml_client_client_options_credentials_file( credentials_file="credentials.json", host=client.DEFAULT_ENDPOINT, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -420,7 +423,7 @@ def test_auto_ml_client_client_options_from_dict(): credentials_file=None, host="squid.clam.whelk", scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -458,6 +461,22 @@ def test_create_dataset_from_dict(): test_create_dataset(request_type=dict) +def test_create_dataset_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_dataset), "__call__") as call: + client.create_dataset() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == service.CreateDatasetRequest() + + @pytest.mark.asyncio async def test_create_dataset_async( transport: str = "grpc_asyncio", request_type=service.CreateDatasetRequest @@ -700,6 +719,22 @@ def test_get_dataset_from_dict(): test_get_dataset(request_type=dict) +def test_get_dataset_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_dataset), "__call__") as call: + client.get_dataset() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == service.GetDatasetRequest() + + @pytest.mark.asyncio async def test_get_dataset_async( transport: str = "grpc_asyncio", request_type=service.GetDatasetRequest @@ -903,6 +938,22 @@ def test_list_datasets_from_dict(): test_list_datasets(request_type=dict) +def test_list_datasets_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_datasets), "__call__") as call: + client.list_datasets() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == service.ListDatasetsRequest() + + @pytest.mark.asyncio async def test_list_datasets_async( transport: str = "grpc_asyncio", request_type=service.ListDatasetsRequest @@ -1233,6 +1284,22 @@ def test_update_dataset_from_dict(): test_update_dataset(request_type=dict) +def test_update_dataset_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_dataset), "__call__") as call: + client.update_dataset() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == service.UpdateDatasetRequest() + + @pytest.mark.asyncio async def test_update_dataset_async( transport: str = "grpc_asyncio", request_type=service.UpdateDatasetRequest @@ -1473,6 +1540,22 @@ def test_delete_dataset_from_dict(): test_delete_dataset(request_type=dict) +def test_delete_dataset_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call: + client.delete_dataset() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == service.DeleteDatasetRequest() + + @pytest.mark.asyncio async def test_delete_dataset_async( transport: str = "grpc_asyncio", request_type=service.DeleteDatasetRequest @@ -1657,6 +1740,22 @@ def test_import_data_from_dict(): test_import_data(request_type=dict) +def test_import_data_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.import_data), "__call__") as call: + client.import_data() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == service.ImportDataRequest() + + @pytest.mark.asyncio async def test_import_data_async( transport: str = "grpc_asyncio", request_type=service.ImportDataRequest @@ -1867,6 +1966,22 @@ def test_export_data_from_dict(): test_export_data(request_type=dict) +def test_export_data_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.export_data), "__call__") as call: + client.export_data() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == service.ExportDataRequest() + + @pytest.mark.asyncio async def test_export_data_async( transport: str = "grpc_asyncio", request_type=service.ExportDataRequest @@ -2102,6 +2217,24 @@ def test_get_annotation_spec_from_dict(): test_get_annotation_spec(request_type=dict) +def test_get_annotation_spec_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_annotation_spec), "__call__" + ) as call: + client.get_annotation_spec() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == service.GetAnnotationSpecRequest() + + @pytest.mark.asyncio async def test_get_annotation_spec_async( transport: str = "grpc_asyncio", request_type=service.GetAnnotationSpecRequest @@ -2306,6 +2439,22 @@ def test_create_model_from_dict(): test_create_model(request_type=dict) +def test_create_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_model), "__call__") as call: + client.create_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == service.CreateModelRequest() + + @pytest.mark.asyncio async def test_create_model_async( transport: str = "grpc_asyncio", request_type=service.CreateModelRequest @@ -2548,6 +2697,22 @@ def test_get_model_from_dict(): test_get_model(request_type=dict) +def test_get_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_model), "__call__") as call: + client.get_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == service.GetModelRequest() + + @pytest.mark.asyncio async def test_get_model_async( transport: str = "grpc_asyncio", request_type=service.GetModelRequest @@ -2749,6 +2914,22 @@ def test_list_models_from_dict(): test_list_models(request_type=dict) +def test_list_models_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_models), "__call__") as call: + client.list_models() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == service.ListModelsRequest() + + @pytest.mark.asyncio async def test_list_models_async( transport: str = "grpc_asyncio", request_type=service.ListModelsRequest @@ -3041,6 +3222,22 @@ def test_delete_model_from_dict(): test_delete_model(request_type=dict) +def test_delete_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_model), "__call__") as call: + client.delete_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == service.DeleteModelRequest() + + @pytest.mark.asyncio async def test_delete_model_async( transport: str = "grpc_asyncio", request_type=service.DeleteModelRequest @@ -3245,6 +3442,22 @@ def test_update_model_from_dict(): test_update_model(request_type=dict) +def test_update_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_model), "__call__") as call: + client.update_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == service.UpdateModelRequest() + + @pytest.mark.asyncio async def test_update_model_async( transport: str = "grpc_asyncio", request_type=service.UpdateModelRequest @@ -3479,6 +3692,22 @@ def test_deploy_model_from_dict(): test_deploy_model(request_type=dict) +def test_deploy_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.deploy_model), "__call__") as call: + client.deploy_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == service.DeployModelRequest() + + @pytest.mark.asyncio async def test_deploy_model_async( transport: str = "grpc_asyncio", request_type=service.DeployModelRequest @@ -3665,6 +3894,22 @@ def test_undeploy_model_from_dict(): test_undeploy_model(request_type=dict) +def test_undeploy_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call: + client.undeploy_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == service.UndeployModelRequest() + + @pytest.mark.asyncio async def test_undeploy_model_async( transport: str = "grpc_asyncio", request_type=service.UndeployModelRequest @@ -3849,6 +4094,22 @@ def test_export_model_from_dict(): test_export_model(request_type=dict) +def test_export_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.export_model), "__call__") as call: + client.export_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == service.ExportModelRequest() + + @pytest.mark.asyncio async def test_export_model_async( transport: str = "grpc_asyncio", request_type=service.ExportModelRequest @@ -4092,6 +4353,24 @@ def test_get_model_evaluation_from_dict(): test_get_model_evaluation(request_type=dict) +def test_get_model_evaluation_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation), "__call__" + ) as call: + client.get_model_evaluation() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == service.GetModelEvaluationRequest() + + @pytest.mark.asyncio async def test_get_model_evaluation_async( transport: str = "grpc_asyncio", request_type=service.GetModelEvaluationRequest @@ -4308,6 +4587,24 @@ def test_list_model_evaluations_from_dict(): test_list_model_evaluations(request_type=dict) +def test_list_model_evaluations_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), "__call__" + ) as call: + client.list_model_evaluations() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == service.ListModelEvaluationsRequest() + + @pytest.mark.asyncio async def test_list_model_evaluations_async( transport: str = "grpc_asyncio", request_type=service.ListModelEvaluationsRequest @@ -4714,7 +5011,7 @@ def test_transport_get_channel(): @pytest.mark.parametrize( "transport_class", - [transports.AutoMlGrpcTransport, transports.AutoMlGrpcAsyncIOTransport], + [transports.AutoMlGrpcTransport, transports.AutoMlGrpcAsyncIOTransport,], ) def test_transport_adc(transport_class): # Test default credentials are used if not provided. @@ -4836,6 +5133,48 @@ def test_auto_ml_transport_auth_adc(): ) +@pytest.mark.parametrize( + "transport_class", + [transports.AutoMlGrpcTransport, transports.AutoMlGrpcAsyncIOTransport], +) +def test_auto_ml_grpc_transport_client_cert_source_for_mtls(transport_class): + cred = credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + def test_auto_ml_host_no_port(): client = AutoMlClient( credentials=credentials.AnonymousCredentials(), @@ -4857,7 +5196,7 @@ def test_auto_ml_host_with_port(): def test_auto_ml_grpc_transport_channel(): - channel = grpc.insecure_channel("http://localhost/") + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) # Check that channel is used if provided. transport = transports.AutoMlGrpcTransport( @@ -4869,7 +5208,7 @@ def test_auto_ml_grpc_transport_channel(): def test_auto_ml_grpc_asyncio_transport_channel(): - channel = aio.insecure_channel("http://localhost/") + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) # Check that channel is used if provided. transport = transports.AutoMlGrpcAsyncIOTransport( @@ -4880,6 +5219,8 @@ def test_auto_ml_grpc_asyncio_transport_channel(): assert transport._ssl_channel_credentials == None +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. @pytest.mark.parametrize( "transport_class", [transports.AutoMlGrpcTransport, transports.AutoMlGrpcAsyncIOTransport], @@ -4889,7 +5230,7 @@ def test_auto_ml_transport_channel_mtls_with_client_cert_source(transport_class) "grpc.ssl_channel_credentials", autospec=True ) as grpc_ssl_channel_cred: with mock.patch.object( - transport_class, "create_channel", autospec=True + transport_class, "create_channel" ) as grpc_create_channel: mock_ssl_cred = mock.Mock() grpc_ssl_channel_cred.return_value = mock_ssl_cred @@ -4927,6 +5268,8 @@ def test_auto_ml_transport_channel_mtls_with_client_cert_source(transport_class) assert transport._ssl_channel_credentials == mock_ssl_cred +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. @pytest.mark.parametrize( "transport_class", [transports.AutoMlGrpcTransport, transports.AutoMlGrpcAsyncIOTransport], @@ -4939,7 +5282,7 @@ def test_auto_ml_transport_channel_mtls_with_adc(transport_class): ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), ): with mock.patch.object( - transport_class, "create_channel", autospec=True + transport_class, "create_channel" ) as grpc_create_channel: mock_grpc_channel = mock.Mock() grpc_create_channel.return_value = mock_grpc_channel diff --git a/tests/unit/gapic/automl_v1/test_prediction_service.py b/tests/unit/gapic/automl_v1/test_prediction_service.py index 72e62d0f..e3d6dfd2 100644 --- a/tests/unit/gapic/automl_v1/test_prediction_service.py +++ b/tests/unit/gapic/automl_v1/test_prediction_service.py @@ -97,7 +97,24 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", [PredictionServiceClient, PredictionServiceAsyncClient] + "client_class", [PredictionServiceClient, PredictionServiceAsyncClient,] +) +def test_prediction_service_client_from_service_account_info(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == "automl.googleapis.com:443" + + +@pytest.mark.parametrize( + "client_class", [PredictionServiceClient, PredictionServiceAsyncClient,] ) def test_prediction_service_client_from_service_account_file(client_class): creds = credentials.AnonymousCredentials() @@ -107,16 +124,21 @@ def test_prediction_service_client_from_service_account_file(client_class): factory.return_value = creds client = client_class.from_service_account_file("dummy/file/path.json") assert client.transport._credentials == creds + assert isinstance(client, client_class) client = client_class.from_service_account_json("dummy/file/path.json") assert client.transport._credentials == creds + assert isinstance(client, client_class) assert client.transport._host == "automl.googleapis.com:443" def test_prediction_service_client_get_transport_class(): transport = PredictionServiceClient.get_transport_class() - assert transport == transports.PredictionServiceGrpcTransport + available_transports = [ + transports.PredictionServiceGrpcTransport, + ] + assert transport in available_transports transport = PredictionServiceClient.get_transport_class("grpc") assert transport == transports.PredictionServiceGrpcTransport @@ -167,7 +189,7 @@ def test_prediction_service_client_client_options( credentials_file=None, host="squid.clam.whelk", scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -183,7 +205,7 @@ def test_prediction_service_client_client_options( credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -199,7 +221,7 @@ def test_prediction_service_client_client_options( credentials_file=None, host=client.DEFAULT_MTLS_ENDPOINT, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -227,7 +249,7 @@ def test_prediction_service_client_client_options( credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id="octopus", client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -288,29 +310,25 @@ def test_prediction_service_client_mtls_env_auto( client_cert_source=client_cert_source_callback ) with mock.patch.object(transport_class, "__init__") as patched: - ssl_channel_creds = mock.Mock() - with mock.patch( - "grpc.ssl_channel_credentials", return_value=ssl_channel_creds - ): - patched.return_value = None - client = client_class(client_options=options) + patched.return_value = None + client = client_class(client_options=options) - if use_client_cert_env == "false": - expected_ssl_channel_creds = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_ssl_channel_creds = ssl_channel_creds - expected_host = client.DEFAULT_MTLS_ENDPOINT + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - ssl_channel_credentials=expected_ssl_channel_creds, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) # Check the case ADC client cert is provided. Whether client cert is used depends on # GOOGLE_API_USE_CLIENT_CERTIFICATE value. @@ -319,66 +337,53 @@ def test_prediction_service_client_mtls_env_auto( ): with mock.patch.object(transport_class, "__init__") as patched: with mock.patch( - "google.auth.transport.grpc.SslCredentials.__init__", return_value=None + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, ): with mock.patch( - "google.auth.transport.grpc.SslCredentials.is_mtls", - new_callable=mock.PropertyMock, - ) as is_mtls_mock: - with mock.patch( - "google.auth.transport.grpc.SslCredentials.ssl_credentials", - new_callable=mock.PropertyMock, - ) as ssl_credentials_mock: - if use_client_cert_env == "false": - is_mtls_mock.return_value = False - ssl_credentials_mock.return_value = None - expected_host = client.DEFAULT_ENDPOINT - expected_ssl_channel_creds = None - else: - is_mtls_mock.return_value = True - ssl_credentials_mock.return_value = mock.Mock() - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_ssl_channel_creds = ( - ssl_credentials_mock.return_value - ) - - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - ssl_channel_credentials=expected_ssl_channel_creds, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict( - os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} - ): - with mock.patch.object(transport_class, "__init__") as patched: - with mock.patch( - "google.auth.transport.grpc.SslCredentials.__init__", return_value=None - ): - with mock.patch( - "google.auth.transport.grpc.SslCredentials.is_mtls", - new_callable=mock.PropertyMock, - ) as is_mtls_mock: - is_mtls_mock.return_value = False patched.return_value = None client = client_class() patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=expected_host, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=expected_client_cert_source, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + @pytest.mark.parametrize( "client_class,transport_class,transport_name", @@ -404,7 +409,7 @@ def test_prediction_service_client_client_options_scopes( credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=["1", "2"], - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -434,7 +439,7 @@ def test_prediction_service_client_client_options_credentials_file( credentials_file="credentials.json", host=client.DEFAULT_ENDPOINT, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -453,7 +458,7 @@ def test_prediction_service_client_client_options_from_dict(): credentials_file=None, host="squid.clam.whelk", scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -492,6 +497,22 @@ def test_predict_from_dict(): test_predict(request_type=dict) +def test_predict_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PredictionServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.predict), "__call__") as call: + client.predict() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == prediction_service.PredictRequest() + + @pytest.mark.asyncio async def test_predict_async( transport: str = "grpc_asyncio", request_type=prediction_service.PredictRequest @@ -718,6 +739,22 @@ def test_batch_predict_from_dict(): test_batch_predict(request_type=dict) +def test_batch_predict_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PredictionServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.batch_predict), "__call__") as call: + client.batch_predict() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == prediction_service.BatchPredictRequest() + + @pytest.mark.asyncio async def test_batch_predict_async( transport: str = "grpc_asyncio", request_type=prediction_service.BatchPredictRequest @@ -1109,6 +1146,51 @@ def test_prediction_service_transport_auth_adc(): ) +@pytest.mark.parametrize( + "transport_class", + [ + transports.PredictionServiceGrpcTransport, + transports.PredictionServiceGrpcAsyncIOTransport, + ], +) +def test_prediction_service_grpc_transport_client_cert_source_for_mtls(transport_class): + cred = credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + def test_prediction_service_host_no_port(): client = PredictionServiceClient( credentials=credentials.AnonymousCredentials(), @@ -1130,7 +1212,7 @@ def test_prediction_service_host_with_port(): def test_prediction_service_grpc_transport_channel(): - channel = grpc.insecure_channel("http://localhost/") + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) # Check that channel is used if provided. transport = transports.PredictionServiceGrpcTransport( @@ -1142,7 +1224,7 @@ def test_prediction_service_grpc_transport_channel(): def test_prediction_service_grpc_asyncio_transport_channel(): - channel = aio.insecure_channel("http://localhost/") + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) # Check that channel is used if provided. transport = transports.PredictionServiceGrpcAsyncIOTransport( @@ -1153,6 +1235,8 @@ def test_prediction_service_grpc_asyncio_transport_channel(): assert transport._ssl_channel_credentials == None +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. @pytest.mark.parametrize( "transport_class", [ @@ -1167,7 +1251,7 @@ def test_prediction_service_transport_channel_mtls_with_client_cert_source( "grpc.ssl_channel_credentials", autospec=True ) as grpc_ssl_channel_cred: with mock.patch.object( - transport_class, "create_channel", autospec=True + transport_class, "create_channel" ) as grpc_create_channel: mock_ssl_cred = mock.Mock() grpc_ssl_channel_cred.return_value = mock_ssl_cred @@ -1205,6 +1289,8 @@ def test_prediction_service_transport_channel_mtls_with_client_cert_source( assert transport._ssl_channel_credentials == mock_ssl_cred +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. @pytest.mark.parametrize( "transport_class", [ @@ -1220,7 +1306,7 @@ def test_prediction_service_transport_channel_mtls_with_adc(transport_class): ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), ): with mock.patch.object( - transport_class, "create_channel", autospec=True + transport_class, "create_channel" ) as grpc_create_channel: mock_grpc_channel = mock.Mock() grpc_create_channel.return_value = mock_grpc_channel diff --git a/tests/unit/gapic/automl_v1beta1/__init__.py b/tests/unit/gapic/automl_v1beta1/__init__.py index 8b137891..42ffdf2b 100644 --- a/tests/unit/gapic/automl_v1beta1/__init__.py +++ b/tests/unit/gapic/automl_v1beta1/__init__.py @@ -1 +1,16 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/tests/unit/gapic/automl_v1beta1/test_auto_ml.py b/tests/unit/gapic/automl_v1beta1/test_auto_ml.py index 69ac24b1..31e45e5c 100644 --- a/tests/unit/gapic/automl_v1beta1/test_auto_ml.py +++ b/tests/unit/gapic/automl_v1beta1/test_auto_ml.py @@ -108,7 +108,22 @@ def test__get_default_mtls_endpoint(): assert AutoMlClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi -@pytest.mark.parametrize("client_class", [AutoMlClient, AutoMlAsyncClient]) +@pytest.mark.parametrize("client_class", [AutoMlClient, AutoMlAsyncClient,]) +def test_auto_ml_client_from_service_account_info(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == "automl.googleapis.com:443" + + +@pytest.mark.parametrize("client_class", [AutoMlClient, AutoMlAsyncClient,]) def test_auto_ml_client_from_service_account_file(client_class): creds = credentials.AnonymousCredentials() with mock.patch.object( @@ -117,16 +132,21 @@ def test_auto_ml_client_from_service_account_file(client_class): factory.return_value = creds client = client_class.from_service_account_file("dummy/file/path.json") assert client.transport._credentials == creds + assert isinstance(client, client_class) client = client_class.from_service_account_json("dummy/file/path.json") assert client.transport._credentials == creds + assert isinstance(client, client_class) assert client.transport._host == "automl.googleapis.com:443" def test_auto_ml_client_get_transport_class(): transport = AutoMlClient.get_transport_class() - assert transport == transports.AutoMlGrpcTransport + available_transports = [ + transports.AutoMlGrpcTransport, + ] + assert transport in available_transports transport = AutoMlClient.get_transport_class("grpc") assert transport == transports.AutoMlGrpcTransport @@ -167,7 +187,7 @@ def test_auto_ml_client_client_options(client_class, transport_class, transport_ credentials_file=None, host="squid.clam.whelk", scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -183,7 +203,7 @@ def test_auto_ml_client_client_options(client_class, transport_class, transport_ credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -199,7 +219,7 @@ def test_auto_ml_client_client_options(client_class, transport_class, transport_ credentials_file=None, host=client.DEFAULT_MTLS_ENDPOINT, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -227,7 +247,7 @@ def test_auto_ml_client_client_options(client_class, transport_class, transport_ credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id="octopus", client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -274,29 +294,25 @@ def test_auto_ml_client_mtls_env_auto( client_cert_source=client_cert_source_callback ) with mock.patch.object(transport_class, "__init__") as patched: - ssl_channel_creds = mock.Mock() - with mock.patch( - "grpc.ssl_channel_credentials", return_value=ssl_channel_creds - ): - patched.return_value = None - client = client_class(client_options=options) + patched.return_value = None + client = client_class(client_options=options) - if use_client_cert_env == "false": - expected_ssl_channel_creds = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_ssl_channel_creds = ssl_channel_creds - expected_host = client.DEFAULT_MTLS_ENDPOINT + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - ssl_channel_credentials=expected_ssl_channel_creds, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) # Check the case ADC client cert is provided. Whether client cert is used depends on # GOOGLE_API_USE_CLIENT_CERTIFICATE value. @@ -305,66 +321,53 @@ def test_auto_ml_client_mtls_env_auto( ): with mock.patch.object(transport_class, "__init__") as patched: with mock.patch( - "google.auth.transport.grpc.SslCredentials.__init__", return_value=None + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, ): with mock.patch( - "google.auth.transport.grpc.SslCredentials.is_mtls", - new_callable=mock.PropertyMock, - ) as is_mtls_mock: - with mock.patch( - "google.auth.transport.grpc.SslCredentials.ssl_credentials", - new_callable=mock.PropertyMock, - ) as ssl_credentials_mock: - if use_client_cert_env == "false": - is_mtls_mock.return_value = False - ssl_credentials_mock.return_value = None - expected_host = client.DEFAULT_ENDPOINT - expected_ssl_channel_creds = None - else: - is_mtls_mock.return_value = True - ssl_credentials_mock.return_value = mock.Mock() - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_ssl_channel_creds = ( - ssl_credentials_mock.return_value - ) - - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - ssl_channel_credentials=expected_ssl_channel_creds, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict( - os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} - ): - with mock.patch.object(transport_class, "__init__") as patched: - with mock.patch( - "google.auth.transport.grpc.SslCredentials.__init__", return_value=None - ): - with mock.patch( - "google.auth.transport.grpc.SslCredentials.is_mtls", - new_callable=mock.PropertyMock, - ) as is_mtls_mock: - is_mtls_mock.return_value = False patched.return_value = None client = client_class() patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=expected_host, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=expected_client_cert_source, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + @pytest.mark.parametrize( "client_class,transport_class,transport_name", @@ -386,7 +389,7 @@ def test_auto_ml_client_client_options_scopes( credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=["1", "2"], - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -412,7 +415,7 @@ def test_auto_ml_client_client_options_credentials_file( credentials_file="credentials.json", host=client.DEFAULT_ENDPOINT, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -429,7 +432,7 @@ def test_auto_ml_client_client_options_from_dict(): credentials_file=None, host="squid.clam.whelk", scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -487,6 +490,22 @@ def test_create_dataset_from_dict(): test_create_dataset(request_type=dict) +def test_create_dataset_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_dataset), "__call__") as call: + client.create_dataset() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == service.CreateDatasetRequest() + + @pytest.mark.asyncio async def test_create_dataset_async( transport: str = "grpc_asyncio", request_type=service.CreateDatasetRequest @@ -741,6 +760,22 @@ def test_get_dataset_from_dict(): test_get_dataset(request_type=dict) +def test_get_dataset_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_dataset), "__call__") as call: + client.get_dataset() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == service.GetDatasetRequest() + + @pytest.mark.asyncio async def test_get_dataset_async( transport: str = "grpc_asyncio", request_type=service.GetDatasetRequest @@ -944,6 +979,22 @@ def test_list_datasets_from_dict(): test_list_datasets(request_type=dict) +def test_list_datasets_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_datasets), "__call__") as call: + client.list_datasets() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == service.ListDatasetsRequest() + + @pytest.mark.asyncio async def test_list_datasets_async( transport: str = "grpc_asyncio", request_type=service.ListDatasetsRequest @@ -1274,6 +1325,22 @@ def test_update_dataset_from_dict(): test_update_dataset(request_type=dict) +def test_update_dataset_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.update_dataset), "__call__") as call: + client.update_dataset() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == service.UpdateDatasetRequest() + + @pytest.mark.asyncio async def test_update_dataset_async( transport: str = "grpc_asyncio", request_type=service.UpdateDatasetRequest @@ -1506,6 +1573,22 @@ def test_delete_dataset_from_dict(): test_delete_dataset(request_type=dict) +def test_delete_dataset_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call: + client.delete_dataset() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == service.DeleteDatasetRequest() + + @pytest.mark.asyncio async def test_delete_dataset_async( transport: str = "grpc_asyncio", request_type=service.DeleteDatasetRequest @@ -1690,6 +1773,22 @@ def test_import_data_from_dict(): test_import_data(request_type=dict) +def test_import_data_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.import_data), "__call__") as call: + client.import_data() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == service.ImportDataRequest() + + @pytest.mark.asyncio async def test_import_data_async( transport: str = "grpc_asyncio", request_type=service.ImportDataRequest @@ -1900,6 +1999,22 @@ def test_export_data_from_dict(): test_export_data(request_type=dict) +def test_export_data_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.export_data), "__call__") as call: + client.export_data() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == service.ExportDataRequest() + + @pytest.mark.asyncio async def test_export_data_async( transport: str = "grpc_asyncio", request_type=service.ExportDataRequest @@ -2135,6 +2250,24 @@ def test_get_annotation_spec_from_dict(): test_get_annotation_spec(request_type=dict) +def test_get_annotation_spec_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_annotation_spec), "__call__" + ) as call: + client.get_annotation_spec() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == service.GetAnnotationSpecRequest() + + @pytest.mark.asyncio async def test_get_annotation_spec_async( transport: str = "grpc_asyncio", request_type=service.GetAnnotationSpecRequest @@ -2361,6 +2494,22 @@ def test_get_table_spec_from_dict(): test_get_table_spec(request_type=dict) +def test_get_table_spec_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_table_spec), "__call__") as call: + client.get_table_spec() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == service.GetTableSpecRequest() + + @pytest.mark.asyncio async def test_get_table_spec_async( transport: str = "grpc_asyncio", request_type=service.GetTableSpecRequest @@ -2571,6 +2720,22 @@ def test_list_table_specs_from_dict(): test_list_table_specs(request_type=dict) +def test_list_table_specs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_table_specs), "__call__") as call: + client.list_table_specs() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == service.ListTableSpecsRequest() + + @pytest.mark.asyncio async def test_list_table_specs_async( transport: str = "grpc_asyncio", request_type=service.ListTableSpecsRequest @@ -2919,6 +3084,24 @@ def test_update_table_spec_from_dict(): test_update_table_spec(request_type=dict) +def test_update_table_spec_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_table_spec), "__call__" + ) as call: + client.update_table_spec() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == service.UpdateTableSpecRequest() + + @pytest.mark.asyncio async def test_update_table_spec_async( transport: str = "grpc_asyncio", request_type=service.UpdateTableSpecRequest @@ -3153,6 +3336,22 @@ def test_get_column_spec_from_dict(): test_get_column_spec(request_type=dict) +def test_get_column_spec_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_column_spec), "__call__") as call: + client.get_column_spec() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == service.GetColumnSpecRequest() + + @pytest.mark.asyncio async def test_get_column_spec_async( transport: str = "grpc_asyncio", request_type=service.GetColumnSpecRequest @@ -3354,6 +3553,24 @@ def test_list_column_specs_from_dict(): test_list_column_specs(request_type=dict) +def test_list_column_specs_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_column_specs), "__call__" + ) as call: + client.list_column_specs() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == service.ListColumnSpecsRequest() + + @pytest.mark.asyncio async def test_list_column_specs_async( transport: str = "grpc_asyncio", request_type=service.ListColumnSpecsRequest @@ -3709,6 +3926,24 @@ def test_update_column_spec_from_dict(): test_update_column_spec(request_type=dict) +def test_update_column_spec_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_column_spec), "__call__" + ) as call: + client.update_column_spec() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == service.UpdateColumnSpecRequest() + + @pytest.mark.asyncio async def test_update_column_spec_async( transport: str = "grpc_asyncio", request_type=service.UpdateColumnSpecRequest @@ -3921,6 +4156,22 @@ def test_create_model_from_dict(): test_create_model(request_type=dict) +def test_create_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.create_model), "__call__") as call: + client.create_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == service.CreateModelRequest() + + @pytest.mark.asyncio async def test_create_model_async( transport: str = "grpc_asyncio", request_type=service.CreateModelRequest @@ -4160,6 +4411,22 @@ def test_get_model_from_dict(): test_get_model(request_type=dict) +def test_get_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_model), "__call__") as call: + client.get_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == service.GetModelRequest() + + @pytest.mark.asyncio async def test_get_model_async( transport: str = "grpc_asyncio", request_type=service.GetModelRequest @@ -4358,6 +4625,22 @@ def test_list_models_from_dict(): test_list_models(request_type=dict) +def test_list_models_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.list_models), "__call__") as call: + client.list_models() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == service.ListModelsRequest() + + @pytest.mark.asyncio async def test_list_models_async( transport: str = "grpc_asyncio", request_type=service.ListModelsRequest @@ -4650,6 +4933,22 @@ def test_delete_model_from_dict(): test_delete_model(request_type=dict) +def test_delete_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.delete_model), "__call__") as call: + client.delete_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == service.DeleteModelRequest() + + @pytest.mark.asyncio async def test_delete_model_async( transport: str = "grpc_asyncio", request_type=service.DeleteModelRequest @@ -4834,6 +5133,22 @@ def test_deploy_model_from_dict(): test_deploy_model(request_type=dict) +def test_deploy_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.deploy_model), "__call__") as call: + client.deploy_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == service.DeployModelRequest() + + @pytest.mark.asyncio async def test_deploy_model_async( transport: str = "grpc_asyncio", request_type=service.DeployModelRequest @@ -5020,6 +5335,22 @@ def test_undeploy_model_from_dict(): test_undeploy_model(request_type=dict) +def test_undeploy_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call: + client.undeploy_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == service.UndeployModelRequest() + + @pytest.mark.asyncio async def test_undeploy_model_async( transport: str = "grpc_asyncio", request_type=service.UndeployModelRequest @@ -5204,6 +5535,22 @@ def test_export_model_from_dict(): test_export_model(request_type=dict) +def test_export_model_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.export_model), "__call__") as call: + client.export_model() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == service.ExportModelRequest() + + @pytest.mark.asyncio async def test_export_model_async( transport: str = "grpc_asyncio", request_type=service.ExportModelRequest @@ -5430,6 +5777,24 @@ def test_export_evaluated_examples_from_dict(): test_export_evaluated_examples(request_type=dict) +def test_export_evaluated_examples_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.export_evaluated_examples), "__call__" + ) as call: + client.export_evaluated_examples() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == service.ExportEvaluatedExamplesRequest() + + @pytest.mark.asyncio async def test_export_evaluated_examples_async( transport: str = "grpc_asyncio", request_type=service.ExportEvaluatedExamplesRequest @@ -5679,6 +6044,24 @@ def test_get_model_evaluation_from_dict(): test_get_model_evaluation(request_type=dict) +def test_get_model_evaluation_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_model_evaluation), "__call__" + ) as call: + client.get_model_evaluation() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == service.GetModelEvaluationRequest() + + @pytest.mark.asyncio async def test_get_model_evaluation_async( transport: str = "grpc_asyncio", request_type=service.GetModelEvaluationRequest @@ -5895,6 +6278,24 @@ def test_list_model_evaluations_from_dict(): test_list_model_evaluations(request_type=dict) +def test_list_model_evaluations_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = AutoMlClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_model_evaluations), "__call__" + ) as call: + client.list_model_evaluations() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == service.ListModelEvaluationsRequest() + + @pytest.mark.asyncio async def test_list_model_evaluations_async( transport: str = "grpc_asyncio", request_type=service.ListModelEvaluationsRequest @@ -6289,7 +6690,7 @@ def test_transport_get_channel(): @pytest.mark.parametrize( "transport_class", - [transports.AutoMlGrpcTransport, transports.AutoMlGrpcAsyncIOTransport], + [transports.AutoMlGrpcTransport, transports.AutoMlGrpcAsyncIOTransport,], ) def test_transport_adc(transport_class): # Test default credentials are used if not provided. @@ -6417,6 +6818,48 @@ def test_auto_ml_transport_auth_adc(): ) +@pytest.mark.parametrize( + "transport_class", + [transports.AutoMlGrpcTransport, transports.AutoMlGrpcAsyncIOTransport], +) +def test_auto_ml_grpc_transport_client_cert_source_for_mtls(transport_class): + cred = credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + def test_auto_ml_host_no_port(): client = AutoMlClient( credentials=credentials.AnonymousCredentials(), @@ -6438,7 +6881,7 @@ def test_auto_ml_host_with_port(): def test_auto_ml_grpc_transport_channel(): - channel = grpc.insecure_channel("http://localhost/") + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) # Check that channel is used if provided. transport = transports.AutoMlGrpcTransport( @@ -6450,7 +6893,7 @@ def test_auto_ml_grpc_transport_channel(): def test_auto_ml_grpc_asyncio_transport_channel(): - channel = aio.insecure_channel("http://localhost/") + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) # Check that channel is used if provided. transport = transports.AutoMlGrpcAsyncIOTransport( @@ -6461,6 +6904,8 @@ def test_auto_ml_grpc_asyncio_transport_channel(): assert transport._ssl_channel_credentials == None +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. @pytest.mark.parametrize( "transport_class", [transports.AutoMlGrpcTransport, transports.AutoMlGrpcAsyncIOTransport], @@ -6470,7 +6915,7 @@ def test_auto_ml_transport_channel_mtls_with_client_cert_source(transport_class) "grpc.ssl_channel_credentials", autospec=True ) as grpc_ssl_channel_cred: with mock.patch.object( - transport_class, "create_channel", autospec=True + transport_class, "create_channel" ) as grpc_create_channel: mock_ssl_cred = mock.Mock() grpc_ssl_channel_cred.return_value = mock_ssl_cred @@ -6508,6 +6953,8 @@ def test_auto_ml_transport_channel_mtls_with_client_cert_source(transport_class) assert transport._ssl_channel_credentials == mock_ssl_cred +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. @pytest.mark.parametrize( "transport_class", [transports.AutoMlGrpcTransport, transports.AutoMlGrpcAsyncIOTransport], @@ -6520,7 +6967,7 @@ def test_auto_ml_transport_channel_mtls_with_adc(transport_class): ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), ): with mock.patch.object( - transport_class, "create_channel", autospec=True + transport_class, "create_channel" ) as grpc_create_channel: mock_grpc_channel = mock.Mock() grpc_create_channel.return_value = mock_grpc_channel diff --git a/tests/unit/gapic/automl_v1beta1/test_prediction_service.py b/tests/unit/gapic/automl_v1beta1/test_prediction_service.py index 2cf567f7..633ffac9 100644 --- a/tests/unit/gapic/automl_v1beta1/test_prediction_service.py +++ b/tests/unit/gapic/automl_v1beta1/test_prediction_service.py @@ -100,7 +100,24 @@ def test__get_default_mtls_endpoint(): @pytest.mark.parametrize( - "client_class", [PredictionServiceClient, PredictionServiceAsyncClient] + "client_class", [PredictionServiceClient, PredictionServiceAsyncClient,] +) +def test_prediction_service_client_from_service_account_info(client_class): + creds = credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == "automl.googleapis.com:443" + + +@pytest.mark.parametrize( + "client_class", [PredictionServiceClient, PredictionServiceAsyncClient,] ) def test_prediction_service_client_from_service_account_file(client_class): creds = credentials.AnonymousCredentials() @@ -110,16 +127,21 @@ def test_prediction_service_client_from_service_account_file(client_class): factory.return_value = creds client = client_class.from_service_account_file("dummy/file/path.json") assert client.transport._credentials == creds + assert isinstance(client, client_class) client = client_class.from_service_account_json("dummy/file/path.json") assert client.transport._credentials == creds + assert isinstance(client, client_class) assert client.transport._host == "automl.googleapis.com:443" def test_prediction_service_client_get_transport_class(): transport = PredictionServiceClient.get_transport_class() - assert transport == transports.PredictionServiceGrpcTransport + available_transports = [ + transports.PredictionServiceGrpcTransport, + ] + assert transport in available_transports transport = PredictionServiceClient.get_transport_class("grpc") assert transport == transports.PredictionServiceGrpcTransport @@ -170,7 +192,7 @@ def test_prediction_service_client_client_options( credentials_file=None, host="squid.clam.whelk", scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -186,7 +208,7 @@ def test_prediction_service_client_client_options( credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -202,7 +224,7 @@ def test_prediction_service_client_client_options( credentials_file=None, host=client.DEFAULT_MTLS_ENDPOINT, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -230,7 +252,7 @@ def test_prediction_service_client_client_options( credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id="octopus", client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -291,29 +313,25 @@ def test_prediction_service_client_mtls_env_auto( client_cert_source=client_cert_source_callback ) with mock.patch.object(transport_class, "__init__") as patched: - ssl_channel_creds = mock.Mock() - with mock.patch( - "grpc.ssl_channel_credentials", return_value=ssl_channel_creds - ): - patched.return_value = None - client = client_class(client_options=options) + patched.return_value = None + client = client_class(client_options=options) - if use_client_cert_env == "false": - expected_ssl_channel_creds = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_ssl_channel_creds = ssl_channel_creds - expected_host = client.DEFAULT_MTLS_ENDPOINT + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - ssl_channel_credentials=expected_ssl_channel_creds, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) # Check the case ADC client cert is provided. Whether client cert is used depends on # GOOGLE_API_USE_CLIENT_CERTIFICATE value. @@ -322,66 +340,53 @@ def test_prediction_service_client_mtls_env_auto( ): with mock.patch.object(transport_class, "__init__") as patched: with mock.patch( - "google.auth.transport.grpc.SslCredentials.__init__", return_value=None + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, ): with mock.patch( - "google.auth.transport.grpc.SslCredentials.is_mtls", - new_callable=mock.PropertyMock, - ) as is_mtls_mock: - with mock.patch( - "google.auth.transport.grpc.SslCredentials.ssl_credentials", - new_callable=mock.PropertyMock, - ) as ssl_credentials_mock: - if use_client_cert_env == "false": - is_mtls_mock.return_value = False - ssl_credentials_mock.return_value = None - expected_host = client.DEFAULT_ENDPOINT - expected_ssl_channel_creds = None - else: - is_mtls_mock.return_value = True - ssl_credentials_mock.return_value = mock.Mock() - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_ssl_channel_creds = ( - ssl_credentials_mock.return_value - ) - - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - ssl_channel_credentials=expected_ssl_channel_creds, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - ) + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict( - os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} - ): - with mock.patch.object(transport_class, "__init__") as patched: - with mock.patch( - "google.auth.transport.grpc.SslCredentials.__init__", return_value=None - ): - with mock.patch( - "google.auth.transport.grpc.SslCredentials.is_mtls", - new_callable=mock.PropertyMock, - ) as is_mtls_mock: - is_mtls_mock.return_value = False patched.return_value = None client = client_class() patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=expected_host, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=expected_client_cert_source, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + @pytest.mark.parametrize( "client_class,transport_class,transport_name", @@ -407,7 +412,7 @@ def test_prediction_service_client_client_options_scopes( credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=["1", "2"], - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -437,7 +442,7 @@ def test_prediction_service_client_client_options_credentials_file( credentials_file="credentials.json", host=client.DEFAULT_ENDPOINT, scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -456,7 +461,7 @@ def test_prediction_service_client_client_options_from_dict(): credentials_file=None, host="squid.clam.whelk", scopes=None, - ssl_channel_credentials=None, + client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -495,6 +500,22 @@ def test_predict_from_dict(): test_predict(request_type=dict) +def test_predict_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PredictionServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.predict), "__call__") as call: + client.predict() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == prediction_service.PredictRequest() + + @pytest.mark.asyncio async def test_predict_async( transport: str = "grpc_asyncio", request_type=prediction_service.PredictRequest @@ -721,6 +742,22 @@ def test_batch_predict_from_dict(): test_batch_predict(request_type=dict) +def test_batch_predict_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = PredictionServiceClient( + credentials=credentials.AnonymousCredentials(), transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.batch_predict), "__call__") as call: + client.batch_predict() + call.assert_called() + _, args, _ = call.mock_calls[0] + + assert args[0] == prediction_service.BatchPredictRequest() + + @pytest.mark.asyncio async def test_batch_predict_async( transport: str = "grpc_asyncio", request_type=prediction_service.BatchPredictRequest @@ -1112,6 +1149,51 @@ def test_prediction_service_transport_auth_adc(): ) +@pytest.mark.parametrize( + "transport_class", + [ + transports.PredictionServiceGrpcTransport, + transports.PredictionServiceGrpcAsyncIOTransport, + ], +) +def test_prediction_service_grpc_transport_client_cert_source_for_mtls(transport_class): + cred = credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + def test_prediction_service_host_no_port(): client = PredictionServiceClient( credentials=credentials.AnonymousCredentials(), @@ -1133,7 +1215,7 @@ def test_prediction_service_host_with_port(): def test_prediction_service_grpc_transport_channel(): - channel = grpc.insecure_channel("http://localhost/") + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) # Check that channel is used if provided. transport = transports.PredictionServiceGrpcTransport( @@ -1145,7 +1227,7 @@ def test_prediction_service_grpc_transport_channel(): def test_prediction_service_grpc_asyncio_transport_channel(): - channel = aio.insecure_channel("http://localhost/") + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) # Check that channel is used if provided. transport = transports.PredictionServiceGrpcAsyncIOTransport( @@ -1156,6 +1238,8 @@ def test_prediction_service_grpc_asyncio_transport_channel(): assert transport._ssl_channel_credentials == None +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. @pytest.mark.parametrize( "transport_class", [ @@ -1170,7 +1254,7 @@ def test_prediction_service_transport_channel_mtls_with_client_cert_source( "grpc.ssl_channel_credentials", autospec=True ) as grpc_ssl_channel_cred: with mock.patch.object( - transport_class, "create_channel", autospec=True + transport_class, "create_channel" ) as grpc_create_channel: mock_ssl_cred = mock.Mock() grpc_ssl_channel_cred.return_value = mock_ssl_cred @@ -1208,6 +1292,8 @@ def test_prediction_service_transport_channel_mtls_with_client_cert_source( assert transport._ssl_channel_credentials == mock_ssl_cred +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. @pytest.mark.parametrize( "transport_class", [ @@ -1223,7 +1309,7 @@ def test_prediction_service_transport_channel_mtls_with_adc(transport_class): ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), ): with mock.patch.object( - transport_class, "create_channel", autospec=True + transport_class, "create_channel" ) as grpc_create_channel: mock_grpc_channel = mock.Mock() grpc_create_channel.return_value = mock_grpc_channel