diff --git a/.coveragerc b/.coveragerc
index dd39c854..0d8e6297 100644
--- a/.coveragerc
+++ b/.coveragerc
@@ -17,6 +17,8 @@
# Generated by synthtool. DO NOT EDIT!
[run]
branch = True
+omit =
+ google/cloud/__init__.py
[report]
fail_under = 100
@@ -32,4 +34,5 @@ omit =
*/gapic/*.py
*/proto/*.py
*/core/*.py
- */site-packages/*.py
\ No newline at end of file
+ */site-packages/*.py
+ google/cloud/__init__.py
diff --git a/.github/snippet-bot.yml b/.github/snippet-bot.yml
new file mode 100644
index 00000000..e69de29b
diff --git a/.kokoro/docs/common.cfg b/.kokoro/docs/common.cfg
index ad294c14..d9b43554 100644
--- a/.kokoro/docs/common.cfg
+++ b/.kokoro/docs/common.cfg
@@ -30,7 +30,7 @@ env_vars: {
env_vars: {
key: "V2_STAGING_BUCKET"
- value: "docs-staging-v2-staging"
+ value: "docs-staging-v2"
}
# It will upload the docker image after successful builds.
diff --git a/.kokoro/populate-secrets.sh b/.kokoro/populate-secrets.sh
new file mode 100755
index 00000000..f5251425
--- /dev/null
+++ b/.kokoro/populate-secrets.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+# Copyright 2020 Google LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -eo pipefail
+
+function now { date +"%Y-%m-%d %H:%M:%S" | tr -d '\n' ;}
+function msg { println "$*" >&2 ;}
+function println { printf '%s\n' "$(now) $*" ;}
+
+
+# Populates requested secrets set in SECRET_MANAGER_KEYS from service account:
+# kokoro-trampoline@cloud-devrel-kokoro-resources.iam.gserviceaccount.com
+SECRET_LOCATION="${KOKORO_GFILE_DIR}/secret_manager"
+msg "Creating folder on disk for secrets: ${SECRET_LOCATION}"
+mkdir -p ${SECRET_LOCATION}
+for key in $(echo ${SECRET_MANAGER_KEYS} | sed "s/,/ /g")
+do
+ msg "Retrieving secret ${key}"
+ docker run --entrypoint=gcloud \
+ --volume=${KOKORO_GFILE_DIR}:${KOKORO_GFILE_DIR} \
+ gcr.io/google.com/cloudsdktool/cloud-sdk \
+ secrets versions access latest \
+ --project cloud-devrel-kokoro-resources \
+ --secret ${key} > \
+ "${SECRET_LOCATION}/${key}"
+ if [[ $? == 0 ]]; then
+ msg "Secret written to ${SECRET_LOCATION}/${key}"
+ else
+ msg "Error retrieving secret ${key}"
+ fi
+done
diff --git a/.kokoro/release/common.cfg b/.kokoro/release/common.cfg
index a66e4d28..29fd838b 100644
--- a/.kokoro/release/common.cfg
+++ b/.kokoro/release/common.cfg
@@ -23,42 +23,18 @@ env_vars: {
value: "github/python-automl/.kokoro/release.sh"
}
-# Fetch the token needed for reporting release status to GitHub
-before_action {
- fetch_keystore {
- keystore_resource {
- keystore_config_id: 73713
- keyname: "yoshi-automation-github-key"
- }
- }
-}
-
-# Fetch PyPI password
-before_action {
- fetch_keystore {
- keystore_resource {
- keystore_config_id: 73713
- keyname: "google_cloud_pypi_password"
- }
- }
-}
-
-# Fetch magictoken to use with Magic Github Proxy
-before_action {
- fetch_keystore {
- keystore_resource {
- keystore_config_id: 73713
- keyname: "releasetool-magictoken"
- }
- }
+# Fetch PyPI password
+before_action {
+ fetch_keystore {
+ keystore_resource {
+ keystore_config_id: 73713
+ keyname: "google_cloud_pypi_password"
+ }
+ }
}
-# Fetch api key to use with Magic Github Proxy
-before_action {
- fetch_keystore {
- keystore_resource {
- keystore_config_id: 73713
- keyname: "magic-github-proxy-api-key"
- }
- }
-}
+# Tokens needed to report release status back to GitHub
+env_vars: {
+ key: "SECRET_MANAGER_KEYS"
+ value: "releasetool-publish-reporter-app,releasetool-publish-reporter-googleapis-installation,releasetool-publish-reporter-pem"
+}
\ No newline at end of file
diff --git a/.kokoro/samples/python3.6/common.cfg b/.kokoro/samples/python3.6/common.cfg
index a67eebd6..01b2dc41 100644
--- a/.kokoro/samples/python3.6/common.cfg
+++ b/.kokoro/samples/python3.6/common.cfg
@@ -13,6 +13,12 @@ env_vars: {
value: "py-3.6"
}
+# Declare build specific Cloud project.
+env_vars: {
+ key: "BUILD_SPECIFIC_GCLOUD_PROJECT"
+ value: "python-docs-samples-tests-py36"
+}
+
env_vars: {
key: "TRAMPOLINE_BUILD_FILE"
value: "github/python-automl/.kokoro/test-samples.sh"
diff --git a/.kokoro/samples/python3.7/common.cfg b/.kokoro/samples/python3.7/common.cfg
index 6fa14a09..e3d87b2d 100644
--- a/.kokoro/samples/python3.7/common.cfg
+++ b/.kokoro/samples/python3.7/common.cfg
@@ -13,6 +13,12 @@ env_vars: {
value: "py-3.7"
}
+# Declare build specific Cloud project.
+env_vars: {
+ key: "BUILD_SPECIFIC_GCLOUD_PROJECT"
+ value: "python-docs-samples-tests-py37"
+}
+
env_vars: {
key: "TRAMPOLINE_BUILD_FILE"
value: "github/python-automl/.kokoro/test-samples.sh"
diff --git a/.kokoro/samples/python3.8/common.cfg b/.kokoro/samples/python3.8/common.cfg
index a74006d4..f11b07ad 100644
--- a/.kokoro/samples/python3.8/common.cfg
+++ b/.kokoro/samples/python3.8/common.cfg
@@ -13,6 +13,12 @@ env_vars: {
value: "py-3.8"
}
+# Declare build specific Cloud project.
+env_vars: {
+ key: "BUILD_SPECIFIC_GCLOUD_PROJECT"
+ value: "python-docs-samples-tests-py38"
+}
+
env_vars: {
key: "TRAMPOLINE_BUILD_FILE"
value: "github/python-automl/.kokoro/test-samples.sh"
diff --git a/.kokoro/test-samples.sh b/.kokoro/test-samples.sh
index 14c39db4..7fa763e0 100755
--- a/.kokoro/test-samples.sh
+++ b/.kokoro/test-samples.sh
@@ -28,6 +28,12 @@ if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"periodic"* ]]; then
git checkout $LATEST_RELEASE
fi
+# Exit early if samples directory doesn't exist
+if [ ! -d "./samples" ]; then
+ echo "No tests run. `./samples` not found"
+ exit 0
+fi
+
# Disable buffering, so that the logs stream through.
export PYTHONUNBUFFERED=1
@@ -101,4 +107,4 @@ cd "$ROOT"
# Workaround for Kokoro permissions issue: delete secrets
rm testing/{test-env.sh,client-secrets.json,service-account.json}
-exit "$RTN"
\ No newline at end of file
+exit "$RTN"
diff --git a/.kokoro/trampoline.sh b/.kokoro/trampoline.sh
index e8c4251f..f39236e9 100755
--- a/.kokoro/trampoline.sh
+++ b/.kokoro/trampoline.sh
@@ -15,9 +15,14 @@
set -eo pipefail
-python3 "${KOKORO_GFILE_DIR}/trampoline_v1.py" || ret_code=$?
+# Always run the cleanup script, regardless of the success of bouncing into
+# the container.
+function cleanup() {
+ chmod +x ${KOKORO_GFILE_DIR}/trampoline_cleanup.sh
+ ${KOKORO_GFILE_DIR}/trampoline_cleanup.sh
+ echo "cleanup";
+}
+trap cleanup EXIT
-chmod +x ${KOKORO_GFILE_DIR}/trampoline_cleanup.sh
-${KOKORO_GFILE_DIR}/trampoline_cleanup.sh || true
-
-exit ${ret_code}
+$(dirname $0)/populate-secrets.sh # Secret Manager secrets.
+python3 "${KOKORO_GFILE_DIR}/trampoline_v1.py"
\ No newline at end of file
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
index b3d1f602..039f4368 100644
--- a/CODE_OF_CONDUCT.md
+++ b/CODE_OF_CONDUCT.md
@@ -1,44 +1,95 @@
-# Contributor Code of Conduct
+# Code of Conduct
-As contributors and maintainers of this project,
-and in the interest of fostering an open and welcoming community,
-we pledge to respect all people who contribute through reporting issues,
-posting feature requests, updating documentation,
-submitting pull requests or patches, and other activities.
+## Our Pledge
-We are committed to making participation in this project
-a harassment-free experience for everyone,
-regardless of level of experience, gender, gender identity and expression,
-sexual orientation, disability, personal appearance,
-body size, race, ethnicity, age, religion, or nationality.
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project and
+our community a harassment-free experience for everyone, regardless of age, body
+size, disability, ethnicity, gender identity and expression, level of
+experience, education, socio-economic status, nationality, personal appearance,
+race, religion, or sexual identity and orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
-* The use of sexualized language or imagery
-* Personal attacks
-* Trolling or insulting/derogatory comments
-* Public or private harassment
-* Publishing other's private information,
-such as physical or electronic
-addresses, without explicit permission
-* Other unethical or unprofessional conduct.
+* The use of sexualized language or imagery and unwelcome sexual attention or
+ advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic
+ address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable
+behavior and are expected to take appropriate and fair corrective action in
+response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or reject
-comments, commits, code, wiki edits, issues, and other contributions
-that are not aligned to this Code of Conduct.
-By adopting this Code of Conduct,
-project maintainers commit themselves to fairly and consistently
-applying these principles to every aspect of managing this project.
-Project maintainers who do not follow or enforce the Code of Conduct
-may be permanently removed from the project team.
-
-This code of conduct applies both within project spaces and in public spaces
-when an individual is representing the project or its community.
-
-Instances of abusive, harassing, or otherwise unacceptable behavior
-may be reported by opening an issue
-or contacting one or more of the project maintainers.
-
-This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0,
-available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/)
+comments, commits, code, wiki edits, issues, and other contributions that are
+not aligned to this Code of Conduct, or to ban temporarily or permanently any
+contributor for other behaviors that they deem inappropriate, threatening,
+offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an appointed
+representative at an online or offline event. Representation of a project may be
+further defined and clarified by project maintainers.
+
+This Code of Conduct also applies outside the project spaces when the Project
+Steward has a reasonable belief that an individual's behavior may have a
+negative impact on the project or its community.
+
+## Conflict Resolution
+
+We do not believe that all conflict is bad; healthy debate and disagreement
+often yield positive results. However, it is never okay to be disrespectful or
+to engage in behavior that violates the project’s code of conduct.
+
+If you see someone violating the code of conduct, you are encouraged to address
+the behavior directly with those involved. Many issues can be resolved quickly
+and easily, and this gives people more control over the outcome of their
+dispute. If you are unable to resolve the matter for any reason, or if the
+behavior is threatening or harassing, report it. We are dedicated to providing
+an environment where participants feel welcome and safe.
+
+
+Reports should be directed to *googleapis-stewards@google.com*, the
+Project Steward(s) for *Google Cloud Client Libraries*. It is the Project Steward’s duty to
+receive and address reported violations of the code of conduct. They will then
+work with a committee consisting of representatives from the Open Source
+Programs Office and the Google Open Source Strategy team. If for any reason you
+are uncomfortable reaching out to the Project Steward, please email
+opensource@google.com.
+
+We will investigate every complaint, but you may not receive a direct response.
+We will use our discretion in determining when and how to follow up on reported
+incidents, which may range from not taking action to permanent expulsion from
+the project and project-sponsored spaces. We will notify the accused of the
+report and provide them an opportunity to discuss it before any action is taken.
+The identity of the reporter will be omitted from the details of the report
+supplied to the accused. In potentially harmful situations, such as ongoing
+harassment or threats to anyone's safety, we may take action without notice.
+
+## Attribution
+
+This Code of Conduct is adapted from the Contributor Covenant, version 1.4,
+available at
+https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
\ No newline at end of file
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index 6d6bd916..db1a2cc2 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -80,25 +80,6 @@ We use `nox `__ to instrument our tests.
.. nox: https://pypi.org/project/nox/
-Note on Editable Installs / Develop Mode
-========================================
-
-- As mentioned previously, using ``setuptools`` in `develop mode`_
- or a ``pip`` `editable install`_ is not possible with this
- library. This is because this library uses `namespace packages`_.
- For context see `Issue #2316`_ and the relevant `PyPA issue`_.
-
- Since ``editable`` / ``develop`` mode can't be used, packages
- need to be installed directly. Hence your changes to the source
- tree don't get incorporated into the **already installed**
- package.
-
-.. _namespace packages: https://www.python.org/dev/peps/pep-0420/
-.. _Issue #2316: https://github.com/GoogleCloudPlatform/google-cloud-python/issues/2316
-.. _PyPA issue: https://github.com/pypa/packaging-problems/issues/12
-.. _develop mode: https://setuptools.readthedocs.io/en/latest/setuptools.html#development-mode
-.. _editable install: https://pip.pypa.io/en/stable/reference/pip_install/#editable-installs
-
*****************************************
I'm getting weird errors... Can you help?
*****************************************
diff --git a/docs/automl_v1/types.rst b/docs/automl_v1/types.rst
index 47a76a80..14a31a9e 100644
--- a/docs/automl_v1/types.rst
+++ b/docs/automl_v1/types.rst
@@ -3,3 +3,4 @@ Types for Google Cloud Automl v1 API
.. automodule:: google.cloud.automl_v1.types
:members:
+ :show-inheritance:
diff --git a/docs/automl_v1beta1/types.rst b/docs/automl_v1beta1/types.rst
index bf190b5b..b50b55f6 100644
--- a/docs/automl_v1beta1/types.rst
+++ b/docs/automl_v1beta1/types.rst
@@ -3,3 +3,4 @@ Types for Google Cloud Automl v1beta1 API
.. automodule:: google.cloud.automl_v1beta1.types
:members:
+ :show-inheritance:
diff --git a/docs/conf.py b/docs/conf.py
index fcaca368..8eb98d14 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -29,7 +29,7 @@
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
-needs_sphinx = "1.6.3"
+needs_sphinx = "1.5.5"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
@@ -39,6 +39,7 @@
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
+ "sphinx.ext.doctest",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
@@ -348,6 +349,7 @@
"google-auth": ("https://google-auth.readthedocs.io/en/stable", None),
"google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None,),
"grpc": ("https://grpc.io/grpc/python/", None),
+ "proto-plus": ("https://proto-plus-python.readthedocs.io/en/latest/", None),
}
diff --git a/google/cloud/automl_v1/__init__.py b/google/cloud/automl_v1/__init__.py
index 6f22bb65..b5f76f81 100644
--- a/google/cloud/automl_v1/__init__.py
+++ b/google/cloud/automl_v1/__init__.py
@@ -104,7 +104,6 @@
__all__ = (
"AnnotationPayload",
"AnnotationSpec",
- "AutoMlClient",
"BatchPredictInputConfig",
"BatchPredictOperationMetadata",
"BatchPredictOutputConfig",
@@ -165,6 +164,7 @@
"OutputConfig",
"PredictRequest",
"PredictResponse",
+ "PredictionServiceClient",
"TextClassificationDatasetMetadata",
"TextClassificationModelMetadata",
"TextExtractionAnnotation",
@@ -185,5 +185,5 @@
"UndeployModelRequest",
"UpdateDatasetRequest",
"UpdateModelRequest",
- "PredictionServiceClient",
+ "AutoMlClient",
)
diff --git a/google/cloud/automl_v1/services/auto_ml/async_client.py b/google/cloud/automl_v1/services/auto_ml/async_client.py
index 23d7b118..254f5cc5 100644
--- a/google/cloud/automl_v1/services/auto_ml/async_client.py
+++ b/google/cloud/automl_v1/services/auto_ml/async_client.py
@@ -79,14 +79,46 @@ class AutoMlAsyncClient:
DEFAULT_ENDPOINT = AutoMlClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = AutoMlClient.DEFAULT_MTLS_ENDPOINT
+ annotation_spec_path = staticmethod(AutoMlClient.annotation_spec_path)
+ parse_annotation_spec_path = staticmethod(AutoMlClient.parse_annotation_spec_path)
dataset_path = staticmethod(AutoMlClient.dataset_path)
parse_dataset_path = staticmethod(AutoMlClient.parse_dataset_path)
model_path = staticmethod(AutoMlClient.model_path)
parse_model_path = staticmethod(AutoMlClient.parse_model_path)
+ model_evaluation_path = staticmethod(AutoMlClient.model_evaluation_path)
+ parse_model_evaluation_path = staticmethod(AutoMlClient.parse_model_evaluation_path)
+
+ common_billing_account_path = staticmethod(AutoMlClient.common_billing_account_path)
+ parse_common_billing_account_path = staticmethod(
+ AutoMlClient.parse_common_billing_account_path
+ )
+
+ common_folder_path = staticmethod(AutoMlClient.common_folder_path)
+ parse_common_folder_path = staticmethod(AutoMlClient.parse_common_folder_path)
+
+ common_organization_path = staticmethod(AutoMlClient.common_organization_path)
+ parse_common_organization_path = staticmethod(
+ AutoMlClient.parse_common_organization_path
+ )
+
+ common_project_path = staticmethod(AutoMlClient.common_project_path)
+ parse_common_project_path = staticmethod(AutoMlClient.parse_common_project_path)
+
+ common_location_path = staticmethod(AutoMlClient.common_location_path)
+ parse_common_location_path = staticmethod(AutoMlClient.parse_common_location_path)
from_service_account_file = AutoMlClient.from_service_account_file
from_service_account_json = from_service_account_file
+ @property
+ def transport(self) -> AutoMlTransport:
+ """Return the transport used by the client instance.
+
+ Returns:
+ AutoMlTransport: The transport used by the client instance.
+ """
+ return self._client.transport
+
get_transport_class = functools.partial(
type(AutoMlClient).get_transport_class, type(AutoMlClient)
)
@@ -187,7 +219,8 @@ async def create_dataset(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([parent, dataset]):
+ has_flattened_params = any([parent, dataset])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -270,7 +303,8 @@ async def get_dataset(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([name]):
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -293,7 +327,7 @@ async def get_dataset(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=5.0,
@@ -352,7 +386,8 @@ async def list_datasets(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([parent]):
+ has_flattened_params = any([parent])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -375,7 +410,7 @@ async def list_datasets(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=5.0,
@@ -446,7 +481,8 @@ async def update_dataset(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([dataset, update_mask]):
+ has_flattened_params = any([dataset, update_mask])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -540,7 +576,8 @@ async def delete_dataset(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([name]):
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -563,7 +600,7 @@ async def delete_dataset(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=5.0,
@@ -660,7 +697,8 @@ async def import_data(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([name, input_config]):
+ has_flattened_params = any([name, input_config])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -766,7 +804,8 @@ async def export_data(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([name, output_config]):
+ has_flattened_params = any([name, output_config])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -845,7 +884,8 @@ async def get_annotation_spec(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([name]):
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -868,7 +908,7 @@ async def get_annotation_spec(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=5.0,
@@ -938,7 +978,8 @@ async def create_model(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([parent, model]):
+ has_flattened_params = any([parent, model])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -1018,7 +1059,8 @@ async def get_model(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([name]):
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -1041,7 +1083,7 @@ async def get_model(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=5.0,
@@ -1100,7 +1142,8 @@ async def list_models(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([parent]):
+ has_flattened_params = any([parent])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -1123,7 +1166,7 @@ async def list_models(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=5.0,
@@ -1203,7 +1246,8 @@ async def delete_model(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([name]):
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -1226,7 +1270,7 @@ async def delete_model(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=5.0,
@@ -1297,7 +1341,8 @@ async def update_model(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([model, update_mask]):
+ has_flattened_params = any([model, update_mask])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -1401,7 +1446,8 @@ async def deploy_model(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([name]):
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -1503,7 +1549,8 @@ async def undeploy_model(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([name]):
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -1613,7 +1660,8 @@ async def export_model(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([name, output_config]):
+ has_flattened_params = any([name, output_config])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -1692,7 +1740,8 @@ async def get_model_evaluation(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([name]):
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -1715,7 +1764,7 @@ async def get_model_evaluation(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=5.0,
@@ -1795,7 +1844,8 @@ async def list_model_evaluations(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([parent, filter]):
+ has_flattened_params = any([parent, filter])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -1820,7 +1870,7 @@ async def list_model_evaluations(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=5.0,
diff --git a/google/cloud/automl_v1/services/auto_ml/client.py b/google/cloud/automl_v1/services/auto_ml/client.py
index 3765ac0b..0b860787 100644
--- a/google/cloud/automl_v1/services/auto_ml/client.py
+++ b/google/cloud/automl_v1/services/auto_ml/client.py
@@ -163,6 +163,36 @@ def from_service_account_file(cls, filename: str, *args, **kwargs):
from_service_account_json = from_service_account_file
+ @property
+ def transport(self) -> AutoMlTransport:
+ """Return the transport used by the client instance.
+
+ Returns:
+ AutoMlTransport: The transport used by the client instance.
+ """
+ return self._transport
+
+ @staticmethod
+ def annotation_spec_path(
+ project: str, location: str, dataset: str, annotation_spec: str,
+ ) -> str:
+ """Return a fully-qualified annotation_spec string."""
+ return "projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}".format(
+ project=project,
+ location=location,
+ dataset=dataset,
+ annotation_spec=annotation_spec,
+ )
+
+ @staticmethod
+ def parse_annotation_spec_path(path: str) -> Dict[str, str]:
+ """Parse a annotation_spec path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/annotationSpecs/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
@staticmethod
def dataset_path(project: str, location: str, dataset: str,) -> str:
"""Return a fully-qualified dataset string."""
@@ -195,6 +225,86 @@ def parse_model_path(path: str) -> Dict[str, str]:
)
return m.groupdict() if m else {}
+ @staticmethod
+ def model_evaluation_path(
+ project: str, location: str, model: str, model_evaluation: str,
+ ) -> str:
+ """Return a fully-qualified model_evaluation string."""
+ return "projects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}".format(
+ project=project,
+ location=location,
+ model=model,
+ model_evaluation=model_evaluation,
+ )
+
+ @staticmethod
+ def parse_model_evaluation_path(path: str) -> Dict[str, str]:
+ """Parse a model_evaluation path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)/modelEvaluations/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_billing_account_path(billing_account: str,) -> str:
+ """Return a fully-qualified billing_account string."""
+ return "billingAccounts/{billing_account}".format(
+ billing_account=billing_account,
+ )
+
+ @staticmethod
+ def parse_common_billing_account_path(path: str) -> Dict[str, str]:
+ """Parse a billing_account path into its component segments."""
+ m = re.match(r"^billingAccounts/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_folder_path(folder: str,) -> str:
+ """Return a fully-qualified folder string."""
+ return "folders/{folder}".format(folder=folder,)
+
+ @staticmethod
+ def parse_common_folder_path(path: str) -> Dict[str, str]:
+ """Parse a folder path into its component segments."""
+ m = re.match(r"^folders/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_organization_path(organization: str,) -> str:
+ """Return a fully-qualified organization string."""
+ return "organizations/{organization}".format(organization=organization,)
+
+ @staticmethod
+ def parse_common_organization_path(path: str) -> Dict[str, str]:
+ """Parse a organization path into its component segments."""
+ m = re.match(r"^organizations/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_project_path(project: str,) -> str:
+ """Return a fully-qualified project string."""
+ return "projects/{project}".format(project=project,)
+
+ @staticmethod
+ def parse_common_project_path(path: str) -> Dict[str, str]:
+ """Parse a project path into its component segments."""
+ m = re.match(r"^projects/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_location_path(project: str, location: str,) -> str:
+ """Return a fully-qualified location string."""
+ return "projects/{project}/locations/{location}".format(
+ project=project, location=location,
+ )
+
+ @staticmethod
+ def parse_common_location_path(path: str) -> Dict[str, str]:
+ """Parse a location path into its component segments."""
+ m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
def __init__(
self,
*,
@@ -230,10 +340,10 @@ def __init__(
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
- client_info (google.api_core.gapic_v1.client_info.ClientInfo):
- The client info used to send a user-agent string along with
- API requests. If ``None``, then default info will be used.
- Generally, you only need to set this if you're developing
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
your own client library.
Raises:
diff --git a/google/cloud/automl_v1/services/auto_ml/transports/base.py b/google/cloud/automl_v1/services/auto_ml/transports/base.py
index b1e12781..7deddee8 100644
--- a/google/cloud/automl_v1/services/auto_ml/transports/base.py
+++ b/google/cloud/automl_v1/services/auto_ml/transports/base.py
@@ -122,7 +122,7 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=5.0,
@@ -135,7 +135,7 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=5.0,
@@ -151,7 +151,7 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=5.0,
@@ -170,7 +170,7 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=5.0,
@@ -186,7 +186,7 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=5.0,
@@ -199,7 +199,7 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=5.0,
@@ -212,7 +212,7 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=5.0,
@@ -237,7 +237,7 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=5.0,
@@ -250,7 +250,7 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=5.0,
diff --git a/google/cloud/automl_v1/services/auto_ml/transports/grpc.py b/google/cloud/automl_v1/services/auto_ml/transports/grpc.py
index b957e5cd..64b34698 100644
--- a/google/cloud/automl_v1/services/auto_ml/transports/grpc.py
+++ b/google/cloud/automl_v1/services/auto_ml/transports/grpc.py
@@ -111,10 +111,10 @@ def __init__(
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
- client_info (google.api_core.gapic_v1.client_info.ClientInfo):
- The client info used to send a user-agent string along with
- API requests. If ``None``, then default info will be used.
- Generally, you only need to set this if you're developing
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
your own client library.
Raises:
@@ -123,6 +123,8 @@ def __init__(
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
+ self._ssl_channel_credentials = ssl_channel_credentials
+
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
@@ -130,6 +132,7 @@ def __init__(
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
+ self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
@@ -166,6 +169,7 @@ def __init__(
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
)
+ self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
@@ -243,12 +247,8 @@ def create_channel(
@property
def grpc_channel(self) -> grpc.Channel:
- """Create the channel designed to connect to this service.
-
- This property caches on the instance; repeated calls return
- the same channel.
+ """Return the channel designed to connect to this service.
"""
- # Return the channel from cache.
return self._grpc_channel
@property
diff --git a/google/cloud/automl_v1/services/auto_ml/transports/grpc_asyncio.py b/google/cloud/automl_v1/services/auto_ml/transports/grpc_asyncio.py
index 0778c063..cc343b55 100644
--- a/google/cloud/automl_v1/services/auto_ml/transports/grpc_asyncio.py
+++ b/google/cloud/automl_v1/services/auto_ml/transports/grpc_asyncio.py
@@ -168,6 +168,8 @@ def __init__(
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
+ self._ssl_channel_credentials = ssl_channel_credentials
+
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
@@ -175,6 +177,7 @@ def __init__(
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
+ self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
@@ -211,6 +214,7 @@ def __init__(
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
)
+ self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
diff --git a/google/cloud/automl_v1/services/prediction_service/async_client.py b/google/cloud/automl_v1/services/prediction_service/async_client.py
index d77836a0..7f922fb3 100644
--- a/google/cloud/automl_v1/services/prediction_service/async_client.py
+++ b/google/cloud/automl_v1/services/prediction_service/async_client.py
@@ -53,9 +53,50 @@ class PredictionServiceAsyncClient:
DEFAULT_ENDPOINT = PredictionServiceClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = PredictionServiceClient.DEFAULT_MTLS_ENDPOINT
+ model_path = staticmethod(PredictionServiceClient.model_path)
+ parse_model_path = staticmethod(PredictionServiceClient.parse_model_path)
+
+ common_billing_account_path = staticmethod(
+ PredictionServiceClient.common_billing_account_path
+ )
+ parse_common_billing_account_path = staticmethod(
+ PredictionServiceClient.parse_common_billing_account_path
+ )
+
+ common_folder_path = staticmethod(PredictionServiceClient.common_folder_path)
+ parse_common_folder_path = staticmethod(
+ PredictionServiceClient.parse_common_folder_path
+ )
+
+ common_organization_path = staticmethod(
+ PredictionServiceClient.common_organization_path
+ )
+ parse_common_organization_path = staticmethod(
+ PredictionServiceClient.parse_common_organization_path
+ )
+
+ common_project_path = staticmethod(PredictionServiceClient.common_project_path)
+ parse_common_project_path = staticmethod(
+ PredictionServiceClient.parse_common_project_path
+ )
+
+ common_location_path = staticmethod(PredictionServiceClient.common_location_path)
+ parse_common_location_path = staticmethod(
+ PredictionServiceClient.parse_common_location_path
+ )
+
from_service_account_file = PredictionServiceClient.from_service_account_file
from_service_account_json = from_service_account_file
+ @property
+ def transport(self) -> PredictionServiceTransport:
+ """Return the transport used by the client instance.
+
+ Returns:
+ PredictionServiceTransport: The transport used by the client instance.
+ """
+ return self._client.transport
+
get_transport_class = functools.partial(
type(PredictionServiceClient).get_transport_class, type(PredictionServiceClient)
)
@@ -225,7 +266,8 @@ async def predict(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([name, payload, params]):
+ has_flattened_params = any([name, payload, params])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -240,8 +282,9 @@ async def predict(
request.name = name
if payload is not None:
request.payload = payload
- if params is not None:
- request.params = params
+
+ if params:
+ request.params.update(params)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
@@ -429,7 +472,8 @@ async def batch_predict(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([name, input_config, output_config, params]):
+ has_flattened_params = any([name, input_config, output_config, params])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -446,8 +490,9 @@ async def batch_predict(
request.input_config = input_config
if output_config is not None:
request.output_config = output_config
- if params is not None:
- request.params = params
+
+ if params:
+ request.params.update(params)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
diff --git a/google/cloud/automl_v1/services/prediction_service/client.py b/google/cloud/automl_v1/services/prediction_service/client.py
index d2c1971a..a56b5a30 100644
--- a/google/cloud/automl_v1/services/prediction_service/client.py
+++ b/google/cloud/automl_v1/services/prediction_service/client.py
@@ -141,6 +141,90 @@ def from_service_account_file(cls, filename: str, *args, **kwargs):
from_service_account_json = from_service_account_file
+ @property
+ def transport(self) -> PredictionServiceTransport:
+ """Return the transport used by the client instance.
+
+ Returns:
+ PredictionServiceTransport: The transport used by the client instance.
+ """
+ return self._transport
+
+ @staticmethod
+ def model_path(project: str, location: str, model: str,) -> str:
+ """Return a fully-qualified model string."""
+ return "projects/{project}/locations/{location}/models/{model}".format(
+ project=project, location=location, model=model,
+ )
+
+ @staticmethod
+ def parse_model_path(path: str) -> Dict[str, str]:
+ """Parse a model path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_billing_account_path(billing_account: str,) -> str:
+ """Return a fully-qualified billing_account string."""
+ return "billingAccounts/{billing_account}".format(
+ billing_account=billing_account,
+ )
+
+ @staticmethod
+ def parse_common_billing_account_path(path: str) -> Dict[str, str]:
+ """Parse a billing_account path into its component segments."""
+ m = re.match(r"^billingAccounts/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_folder_path(folder: str,) -> str:
+ """Return a fully-qualified folder string."""
+ return "folders/{folder}".format(folder=folder,)
+
+ @staticmethod
+ def parse_common_folder_path(path: str) -> Dict[str, str]:
+ """Parse a folder path into its component segments."""
+ m = re.match(r"^folders/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_organization_path(organization: str,) -> str:
+ """Return a fully-qualified organization string."""
+ return "organizations/{organization}".format(organization=organization,)
+
+ @staticmethod
+ def parse_common_organization_path(path: str) -> Dict[str, str]:
+ """Parse a organization path into its component segments."""
+ m = re.match(r"^organizations/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_project_path(project: str,) -> str:
+ """Return a fully-qualified project string."""
+ return "projects/{project}".format(project=project,)
+
+ @staticmethod
+ def parse_common_project_path(path: str) -> Dict[str, str]:
+ """Parse a project path into its component segments."""
+ m = re.match(r"^projects/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_location_path(project: str, location: str,) -> str:
+ """Return a fully-qualified location string."""
+ return "projects/{project}/locations/{location}".format(
+ project=project, location=location,
+ )
+
+ @staticmethod
+ def parse_common_location_path(path: str) -> Dict[str, str]:
+ """Parse a location path into its component segments."""
+ m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
def __init__(
self,
*,
@@ -176,10 +260,10 @@ def __init__(
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
- client_info (google.api_core.gapic_v1.client_info.ClientInfo):
- The client info used to send a user-agent string along with
- API requests. If ``None``, then default info will be used.
- Generally, you only need to set this if you're developing
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
your own client library.
Raises:
@@ -396,8 +480,9 @@ def predict(
request.name = name
if payload is not None:
request.payload = payload
- if params is not None:
- request.params = params
+
+ if params:
+ request.params.update(params)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
@@ -604,8 +689,9 @@ def batch_predict(
request.input_config = input_config
if output_config is not None:
request.output_config = output_config
- if params is not None:
- request.params = params
+
+ if params:
+ request.params.update(params)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
diff --git a/google/cloud/automl_v1/services/prediction_service/transports/grpc.py b/google/cloud/automl_v1/services/prediction_service/transports/grpc.py
index 9a2c8e9b..40833c1e 100644
--- a/google/cloud/automl_v1/services/prediction_service/transports/grpc.py
+++ b/google/cloud/automl_v1/services/prediction_service/transports/grpc.py
@@ -94,10 +94,10 @@ def __init__(
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
- client_info (google.api_core.gapic_v1.client_info.ClientInfo):
- The client info used to send a user-agent string along with
- API requests. If ``None``, then default info will be used.
- Generally, you only need to set this if you're developing
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
your own client library.
Raises:
@@ -106,6 +106,8 @@ def __init__(
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
+ self._ssl_channel_credentials = ssl_channel_credentials
+
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
@@ -113,6 +115,7 @@ def __init__(
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
+ self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
@@ -149,6 +152,7 @@ def __init__(
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
)
+ self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
@@ -226,12 +230,8 @@ def create_channel(
@property
def grpc_channel(self) -> grpc.Channel:
- """Create the channel designed to connect to this service.
-
- This property caches on the instance; repeated calls return
- the same channel.
+ """Return the channel designed to connect to this service.
"""
- # Return the channel from cache.
return self._grpc_channel
@property
diff --git a/google/cloud/automl_v1/services/prediction_service/transports/grpc_asyncio.py b/google/cloud/automl_v1/services/prediction_service/transports/grpc_asyncio.py
index eddcb6ab..8f8e0987 100644
--- a/google/cloud/automl_v1/services/prediction_service/transports/grpc_asyncio.py
+++ b/google/cloud/automl_v1/services/prediction_service/transports/grpc_asyncio.py
@@ -151,6 +151,8 @@ def __init__(
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
+ self._ssl_channel_credentials = ssl_channel_credentials
+
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
@@ -158,6 +160,7 @@ def __init__(
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
+ self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
@@ -194,6 +197,7 @@ def __init__(
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
)
+ self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
diff --git a/google/cloud/automl_v1/types/data_items.py b/google/cloud/automl_v1/types/data_items.py
index 51ccc477..554ed762 100644
--- a/google/cloud/automl_v1/types/data_items.py
+++ b/google/cloud/automl_v1/types/data_items.py
@@ -186,12 +186,12 @@ class TextSegmentType(proto.Enum):
input_config = proto.Field(proto.MESSAGE, number=1, message=io.DocumentInputConfig,)
- document_text = proto.Field(proto.MESSAGE, number=2, message=TextSnippet,)
+ document_text = proto.Field(proto.MESSAGE, number=2, message="TextSnippet",)
layout = proto.RepeatedField(proto.MESSAGE, number=3, message=Layout,)
document_dimensions = proto.Field(
- proto.MESSAGE, number=4, message=DocumentDimensions,
+ proto.MESSAGE, number=4, message="DocumentDimensions",
)
page_count = proto.Field(proto.INT32, number=5)
@@ -209,13 +209,15 @@ class ExamplePayload(proto.Message):
Example document.
"""
- image = proto.Field(proto.MESSAGE, number=1, oneof="payload", message=Image,)
+ image = proto.Field(proto.MESSAGE, number=1, oneof="payload", message="Image",)
text_snippet = proto.Field(
- proto.MESSAGE, number=2, oneof="payload", message=TextSnippet,
+ proto.MESSAGE, number=2, oneof="payload", message="TextSnippet",
)
- document = proto.Field(proto.MESSAGE, number=4, oneof="payload", message=Document,)
+ document = proto.Field(
+ proto.MESSAGE, number=4, oneof="payload", message="Document",
+ )
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/automl_v1/types/detection.py b/google/cloud/automl_v1/types/detection.py
index 6eab690c..69ca1d54 100644
--- a/google/cloud/automl_v1/types/detection.py
+++ b/google/cloud/automl_v1/types/detection.py
@@ -128,7 +128,7 @@ class ImageObjectDetectionEvaluationMetrics(proto.Message):
evaluated_bounding_box_count = proto.Field(proto.INT32, number=1)
bounding_box_metrics_entries = proto.RepeatedField(
- proto.MESSAGE, number=2, message=BoundingBoxMetricsEntry,
+ proto.MESSAGE, number=2, message="BoundingBoxMetricsEntry",
)
bounding_box_mean_average_precision = proto.Field(proto.FLOAT, number=3)
diff --git a/google/cloud/automl_v1/types/geometry.py b/google/cloud/automl_v1/types/geometry.py
index f459ca52..07c22dd9 100644
--- a/google/cloud/automl_v1/types/geometry.py
+++ b/google/cloud/automl_v1/types/geometry.py
@@ -55,7 +55,7 @@ class BoundingPoly(proto.Message):
"""
normalized_vertices = proto.RepeatedField(
- proto.MESSAGE, number=2, message=NormalizedVertex,
+ proto.MESSAGE, number=2, message="NormalizedVertex",
)
diff --git a/google/cloud/automl_v1beta1/services/auto_ml/async_client.py b/google/cloud/automl_v1beta1/services/auto_ml/async_client.py
index b7a10974..cd2c4388 100644
--- a/google/cloud/automl_v1beta1/services/auto_ml/async_client.py
+++ b/google/cloud/automl_v1beta1/services/auto_ml/async_client.py
@@ -87,18 +87,50 @@ class AutoMlAsyncClient:
DEFAULT_ENDPOINT = AutoMlClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = AutoMlClient.DEFAULT_MTLS_ENDPOINT
+ annotation_spec_path = staticmethod(AutoMlClient.annotation_spec_path)
+ parse_annotation_spec_path = staticmethod(AutoMlClient.parse_annotation_spec_path)
column_spec_path = staticmethod(AutoMlClient.column_spec_path)
parse_column_spec_path = staticmethod(AutoMlClient.parse_column_spec_path)
dataset_path = staticmethod(AutoMlClient.dataset_path)
parse_dataset_path = staticmethod(AutoMlClient.parse_dataset_path)
model_path = staticmethod(AutoMlClient.model_path)
parse_model_path = staticmethod(AutoMlClient.parse_model_path)
+ model_evaluation_path = staticmethod(AutoMlClient.model_evaluation_path)
+ parse_model_evaluation_path = staticmethod(AutoMlClient.parse_model_evaluation_path)
table_spec_path = staticmethod(AutoMlClient.table_spec_path)
parse_table_spec_path = staticmethod(AutoMlClient.parse_table_spec_path)
+ common_billing_account_path = staticmethod(AutoMlClient.common_billing_account_path)
+ parse_common_billing_account_path = staticmethod(
+ AutoMlClient.parse_common_billing_account_path
+ )
+
+ common_folder_path = staticmethod(AutoMlClient.common_folder_path)
+ parse_common_folder_path = staticmethod(AutoMlClient.parse_common_folder_path)
+
+ common_organization_path = staticmethod(AutoMlClient.common_organization_path)
+ parse_common_organization_path = staticmethod(
+ AutoMlClient.parse_common_organization_path
+ )
+
+ common_project_path = staticmethod(AutoMlClient.common_project_path)
+ parse_common_project_path = staticmethod(AutoMlClient.parse_common_project_path)
+
+ common_location_path = staticmethod(AutoMlClient.common_location_path)
+ parse_common_location_path = staticmethod(AutoMlClient.parse_common_location_path)
+
from_service_account_file = AutoMlClient.from_service_account_file
from_service_account_json = from_service_account_file
+ @property
+ def transport(self) -> AutoMlTransport:
+ """Return the transport used by the client instance.
+
+ Returns:
+ AutoMlTransport: The transport used by the client instance.
+ """
+ return self._client.transport
+
get_transport_class = functools.partial(
type(AutoMlClient).get_transport_class, type(AutoMlClient)
)
@@ -196,7 +228,8 @@ async def create_dataset(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([parent, dataset]):
+ has_flattened_params = any([parent, dataset])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -271,7 +304,8 @@ async def get_dataset(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([name]):
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -294,7 +328,7 @@ async def get_dataset(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=5.0,
@@ -353,7 +387,8 @@ async def list_datasets(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([parent]):
+ has_flattened_params = any([parent])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -376,7 +411,7 @@ async def list_datasets(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=5.0,
@@ -440,7 +475,8 @@ async def update_dataset(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([dataset]):
+ has_flattened_params = any([dataset])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -532,7 +568,8 @@ async def delete_dataset(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([name]):
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -555,7 +592,7 @@ async def delete_dataset(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=5.0,
@@ -652,7 +689,8 @@ async def import_data(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([name, input_config]):
+ has_flattened_params = any([name, input_config])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -758,7 +796,8 @@ async def export_data(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([name, output_config]):
+ has_flattened_params = any([name, output_config])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -837,7 +876,8 @@ async def get_annotation_spec(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([name]):
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -860,7 +900,7 @@ async def get_annotation_spec(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=5.0,
@@ -925,7 +965,8 @@ async def get_table_spec(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([name]):
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -948,7 +989,7 @@ async def get_table_spec(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=5.0,
@@ -1007,7 +1048,8 @@ async def list_table_specs(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([parent]):
+ has_flattened_params = any([parent])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -1030,7 +1072,7 @@ async def list_table_specs(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=5.0,
@@ -1101,7 +1143,8 @@ async def update_table_spec(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([table_spec]):
+ has_flattened_params = any([table_spec])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -1177,7 +1220,8 @@ async def get_column_spec(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([name]):
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -1200,7 +1244,7 @@ async def get_column_spec(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=5.0,
@@ -1259,7 +1303,8 @@ async def list_column_specs(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([parent]):
+ has_flattened_params = any([parent])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -1282,7 +1327,7 @@ async def list_column_specs(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=5.0,
@@ -1347,7 +1392,8 @@ async def update_column_spec(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([column_spec]):
+ has_flattened_params = any([column_spec])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -1434,7 +1480,8 @@ async def create_model(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([parent, model]):
+ has_flattened_params = any([parent, model])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -1514,7 +1561,8 @@ async def get_model(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([name]):
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -1537,7 +1585,7 @@ async def get_model(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=5.0,
@@ -1596,7 +1644,8 @@ async def list_models(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([parent]):
+ has_flattened_params = any([parent])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -1619,7 +1668,7 @@ async def list_models(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=5.0,
@@ -1699,7 +1748,8 @@ async def delete_model(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([name]):
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -1722,7 +1772,7 @@ async def delete_model(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=5.0,
@@ -1815,7 +1865,8 @@ async def deploy_model(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([name]):
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -1917,7 +1968,8 @@ async def undeploy_model(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([name]):
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -2028,7 +2080,8 @@ async def export_model(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([name, output_config]):
+ has_flattened_params = any([name, output_config])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -2146,7 +2199,8 @@ async def export_evaluated_examples(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([name, output_config]):
+ has_flattened_params = any([name, output_config])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -2225,7 +2279,8 @@ async def get_model_evaluation(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([name]):
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -2248,7 +2303,7 @@ async def get_model_evaluation(
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=5.0,
@@ -2310,7 +2365,8 @@ async def list_model_evaluations(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([parent]):
+ has_flattened_params = any([parent])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
diff --git a/google/cloud/automl_v1beta1/services/auto_ml/client.py b/google/cloud/automl_v1beta1/services/auto_ml/client.py
index 6ae4f4c2..a930910e 100644
--- a/google/cloud/automl_v1beta1/services/auto_ml/client.py
+++ b/google/cloud/automl_v1beta1/services/auto_ml/client.py
@@ -171,6 +171,36 @@ def from_service_account_file(cls, filename: str, *args, **kwargs):
from_service_account_json = from_service_account_file
+ @property
+ def transport(self) -> AutoMlTransport:
+ """Return the transport used by the client instance.
+
+ Returns:
+ AutoMlTransport: The transport used by the client instance.
+ """
+ return self._transport
+
+ @staticmethod
+ def annotation_spec_path(
+ project: str, location: str, dataset: str, annotation_spec: str,
+ ) -> str:
+ """Return a fully-qualified annotation_spec string."""
+ return "projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}".format(
+ project=project,
+ location=location,
+ dataset=dataset,
+ annotation_spec=annotation_spec,
+ )
+
+ @staticmethod
+ def parse_annotation_spec_path(path: str) -> Dict[str, str]:
+ """Parse a annotation_spec path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/locations/(?P.+?)/datasets/(?P.+?)/annotationSpecs/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
@staticmethod
def column_spec_path(
project: str, location: str, dataset: str, table_spec: str, column_spec: str,
@@ -225,6 +255,27 @@ def parse_model_path(path: str) -> Dict[str, str]:
)
return m.groupdict() if m else {}
+ @staticmethod
+ def model_evaluation_path(
+ project: str, location: str, model: str, model_evaluation: str,
+ ) -> str:
+ """Return a fully-qualified model_evaluation string."""
+ return "projects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}".format(
+ project=project,
+ location=location,
+ model=model,
+ model_evaluation=model_evaluation,
+ )
+
+ @staticmethod
+ def parse_model_evaluation_path(path: str) -> Dict[str, str]:
+ """Parse a model_evaluation path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)/modelEvaluations/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
@staticmethod
def table_spec_path(
project: str, location: str, dataset: str, table_spec: str,
@@ -243,6 +294,65 @@ def parse_table_spec_path(path: str) -> Dict[str, str]:
)
return m.groupdict() if m else {}
+ @staticmethod
+ def common_billing_account_path(billing_account: str,) -> str:
+ """Return a fully-qualified billing_account string."""
+ return "billingAccounts/{billing_account}".format(
+ billing_account=billing_account,
+ )
+
+ @staticmethod
+ def parse_common_billing_account_path(path: str) -> Dict[str, str]:
+ """Parse a billing_account path into its component segments."""
+ m = re.match(r"^billingAccounts/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_folder_path(folder: str,) -> str:
+ """Return a fully-qualified folder string."""
+ return "folders/{folder}".format(folder=folder,)
+
+ @staticmethod
+ def parse_common_folder_path(path: str) -> Dict[str, str]:
+ """Parse a folder path into its component segments."""
+ m = re.match(r"^folders/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_organization_path(organization: str,) -> str:
+ """Return a fully-qualified organization string."""
+ return "organizations/{organization}".format(organization=organization,)
+
+ @staticmethod
+ def parse_common_organization_path(path: str) -> Dict[str, str]:
+ """Parse a organization path into its component segments."""
+ m = re.match(r"^organizations/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_project_path(project: str,) -> str:
+ """Return a fully-qualified project string."""
+ return "projects/{project}".format(project=project,)
+
+ @staticmethod
+ def parse_common_project_path(path: str) -> Dict[str, str]:
+ """Parse a project path into its component segments."""
+ m = re.match(r"^projects/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_location_path(project: str, location: str,) -> str:
+ """Return a fully-qualified location string."""
+ return "projects/{project}/locations/{location}".format(
+ project=project, location=location,
+ )
+
+ @staticmethod
+ def parse_common_location_path(path: str) -> Dict[str, str]:
+ """Parse a location path into its component segments."""
+ m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
def __init__(
self,
*,
@@ -278,10 +388,10 @@ def __init__(
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
- client_info (google.api_core.gapic_v1.client_info.ClientInfo):
- The client info used to send a user-agent string along with
- API requests. If ``None``, then default info will be used.
- Generally, you only need to set this if you're developing
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
your own client library.
Raises:
diff --git a/google/cloud/automl_v1beta1/services/auto_ml/transports/base.py b/google/cloud/automl_v1beta1/services/auto_ml/transports/base.py
index 642c9f7b..63f0601f 100644
--- a/google/cloud/automl_v1beta1/services/auto_ml/transports/base.py
+++ b/google/cloud/automl_v1beta1/services/auto_ml/transports/base.py
@@ -125,7 +125,7 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=5.0,
@@ -138,7 +138,7 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=5.0,
@@ -154,7 +154,7 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=5.0,
@@ -173,7 +173,7 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=5.0,
@@ -186,7 +186,7 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=5.0,
@@ -199,7 +199,7 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=5.0,
@@ -215,7 +215,7 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=5.0,
@@ -228,7 +228,7 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=5.0,
@@ -247,7 +247,7 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=5.0,
@@ -260,7 +260,7 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=5.0,
@@ -273,7 +273,7 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=5.0,
@@ -300,7 +300,7 @@ def _prep_wrapped_messages(self, client_info):
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
- exceptions.ServiceUnavailable, exceptions.DeadlineExceeded,
+ exceptions.DeadlineExceeded, exceptions.ServiceUnavailable,
),
),
default_timeout=5.0,
diff --git a/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc.py b/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc.py
index a8cb2fd7..eab479d8 100644
--- a/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc.py
+++ b/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc.py
@@ -114,10 +114,10 @@ def __init__(
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
- client_info (google.api_core.gapic_v1.client_info.ClientInfo):
- The client info used to send a user-agent string along with
- API requests. If ``None``, then default info will be used.
- Generally, you only need to set this if you're developing
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
your own client library.
Raises:
@@ -126,6 +126,8 @@ def __init__(
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
+ self._ssl_channel_credentials = ssl_channel_credentials
+
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
@@ -133,6 +135,7 @@ def __init__(
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
+ self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
@@ -169,6 +172,7 @@ def __init__(
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
)
+ self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
@@ -246,12 +250,8 @@ def create_channel(
@property
def grpc_channel(self) -> grpc.Channel:
- """Create the channel designed to connect to this service.
-
- This property caches on the instance; repeated calls return
- the same channel.
+ """Return the channel designed to connect to this service.
"""
- # Return the channel from cache.
return self._grpc_channel
@property
diff --git a/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc_asyncio.py b/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc_asyncio.py
index a977ad45..ca80586e 100644
--- a/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc_asyncio.py
+++ b/google/cloud/automl_v1beta1/services/auto_ml/transports/grpc_asyncio.py
@@ -171,6 +171,8 @@ def __init__(
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
+ self._ssl_channel_credentials = ssl_channel_credentials
+
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
@@ -178,6 +180,7 @@ def __init__(
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
+ self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
@@ -214,6 +217,7 @@ def __init__(
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
)
+ self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
diff --git a/google/cloud/automl_v1beta1/services/prediction_service/async_client.py b/google/cloud/automl_v1beta1/services/prediction_service/async_client.py
index c204325b..b1eadd8e 100644
--- a/google/cloud/automl_v1beta1/services/prediction_service/async_client.py
+++ b/google/cloud/automl_v1beta1/services/prediction_service/async_client.py
@@ -53,9 +53,50 @@ class PredictionServiceAsyncClient:
DEFAULT_ENDPOINT = PredictionServiceClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = PredictionServiceClient.DEFAULT_MTLS_ENDPOINT
+ model_path = staticmethod(PredictionServiceClient.model_path)
+ parse_model_path = staticmethod(PredictionServiceClient.parse_model_path)
+
+ common_billing_account_path = staticmethod(
+ PredictionServiceClient.common_billing_account_path
+ )
+ parse_common_billing_account_path = staticmethod(
+ PredictionServiceClient.parse_common_billing_account_path
+ )
+
+ common_folder_path = staticmethod(PredictionServiceClient.common_folder_path)
+ parse_common_folder_path = staticmethod(
+ PredictionServiceClient.parse_common_folder_path
+ )
+
+ common_organization_path = staticmethod(
+ PredictionServiceClient.common_organization_path
+ )
+ parse_common_organization_path = staticmethod(
+ PredictionServiceClient.parse_common_organization_path
+ )
+
+ common_project_path = staticmethod(PredictionServiceClient.common_project_path)
+ parse_common_project_path = staticmethod(
+ PredictionServiceClient.parse_common_project_path
+ )
+
+ common_location_path = staticmethod(PredictionServiceClient.common_location_path)
+ parse_common_location_path = staticmethod(
+ PredictionServiceClient.parse_common_location_path
+ )
+
from_service_account_file = PredictionServiceClient.from_service_account_file
from_service_account_json = from_service_account_file
+ @property
+ def transport(self) -> PredictionServiceTransport:
+ """Return the transport used by the client instance.
+
+ Returns:
+ PredictionServiceTransport: The transport used by the client instance.
+ """
+ return self._client.transport
+
get_transport_class = functools.partial(
type(PredictionServiceClient).get_transport_class, type(PredictionServiceClient)
)
@@ -201,7 +242,8 @@ async def predict(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([name, payload, params]):
+ has_flattened_params = any([name, payload, params])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -216,8 +258,9 @@ async def predict(
request.name = name
if payload is not None:
request.payload = payload
- if params is not None:
- request.params = params
+
+ if params:
+ request.params.update(params)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
@@ -401,7 +444,8 @@ async def batch_predict(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([name, input_config, output_config, params]):
+ has_flattened_params = any([name, input_config, output_config, params])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -418,8 +462,9 @@ async def batch_predict(
request.input_config = input_config
if output_config is not None:
request.output_config = output_config
- if params is not None:
- request.params = params
+
+ if params:
+ request.params.update(params)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
diff --git a/google/cloud/automl_v1beta1/services/prediction_service/client.py b/google/cloud/automl_v1beta1/services/prediction_service/client.py
index 78ec510c..7508e83a 100644
--- a/google/cloud/automl_v1beta1/services/prediction_service/client.py
+++ b/google/cloud/automl_v1beta1/services/prediction_service/client.py
@@ -141,6 +141,90 @@ def from_service_account_file(cls, filename: str, *args, **kwargs):
from_service_account_json = from_service_account_file
+ @property
+ def transport(self) -> PredictionServiceTransport:
+ """Return the transport used by the client instance.
+
+ Returns:
+ PredictionServiceTransport: The transport used by the client instance.
+ """
+ return self._transport
+
+ @staticmethod
+ def model_path(project: str, location: str, model: str,) -> str:
+ """Return a fully-qualified model string."""
+ return "projects/{project}/locations/{location}/models/{model}".format(
+ project=project, location=location, model=model,
+ )
+
+ @staticmethod
+ def parse_model_path(path: str) -> Dict[str, str]:
+ """Parse a model path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/locations/(?P.+?)/models/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_billing_account_path(billing_account: str,) -> str:
+ """Return a fully-qualified billing_account string."""
+ return "billingAccounts/{billing_account}".format(
+ billing_account=billing_account,
+ )
+
+ @staticmethod
+ def parse_common_billing_account_path(path: str) -> Dict[str, str]:
+ """Parse a billing_account path into its component segments."""
+ m = re.match(r"^billingAccounts/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_folder_path(folder: str,) -> str:
+ """Return a fully-qualified folder string."""
+ return "folders/{folder}".format(folder=folder,)
+
+ @staticmethod
+ def parse_common_folder_path(path: str) -> Dict[str, str]:
+ """Parse a folder path into its component segments."""
+ m = re.match(r"^folders/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_organization_path(organization: str,) -> str:
+ """Return a fully-qualified organization string."""
+ return "organizations/{organization}".format(organization=organization,)
+
+ @staticmethod
+ def parse_common_organization_path(path: str) -> Dict[str, str]:
+ """Parse a organization path into its component segments."""
+ m = re.match(r"^organizations/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_project_path(project: str,) -> str:
+ """Return a fully-qualified project string."""
+ return "projects/{project}".format(project=project,)
+
+ @staticmethod
+ def parse_common_project_path(path: str) -> Dict[str, str]:
+ """Parse a project path into its component segments."""
+ m = re.match(r"^projects/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_location_path(project: str, location: str,) -> str:
+ """Return a fully-qualified location string."""
+ return "projects/{project}/locations/{location}".format(
+ project=project, location=location,
+ )
+
+ @staticmethod
+ def parse_common_location_path(path: str) -> Dict[str, str]:
+ """Parse a location path into its component segments."""
+ m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
def __init__(
self,
*,
@@ -176,10 +260,10 @@ def __init__(
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
- client_info (google.api_core.gapic_v1.client_info.ClientInfo):
- The client info used to send a user-agent string along with
- API requests. If ``None``, then default info will be used.
- Generally, you only need to set this if you're developing
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
your own client library.
Raises:
@@ -372,8 +456,9 @@ def predict(
request.name = name
if payload is not None:
request.payload = payload
- if params is not None:
- request.params = params
+
+ if params:
+ request.params.update(params)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
@@ -576,8 +661,9 @@ def batch_predict(
request.input_config = input_config
if output_config is not None:
request.output_config = output_config
- if params is not None:
- request.params = params
+
+ if params:
+ request.params.update(params)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
diff --git a/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc.py b/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc.py
index 3c484247..8b410eae 100644
--- a/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc.py
+++ b/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc.py
@@ -94,10 +94,10 @@ def __init__(
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
- client_info (google.api_core.gapic_v1.client_info.ClientInfo):
- The client info used to send a user-agent string along with
- API requests. If ``None``, then default info will be used.
- Generally, you only need to set this if you're developing
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
your own client library.
Raises:
@@ -106,6 +106,8 @@ def __init__(
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
+ self._ssl_channel_credentials = ssl_channel_credentials
+
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
@@ -113,6 +115,7 @@ def __init__(
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
+ self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
@@ -149,6 +152,7 @@ def __init__(
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
)
+ self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
@@ -226,12 +230,8 @@ def create_channel(
@property
def grpc_channel(self) -> grpc.Channel:
- """Create the channel designed to connect to this service.
-
- This property caches on the instance; repeated calls return
- the same channel.
+ """Return the channel designed to connect to this service.
"""
- # Return the channel from cache.
return self._grpc_channel
@property
diff --git a/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc_asyncio.py b/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc_asyncio.py
index 0b1bb638..7f110733 100644
--- a/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc_asyncio.py
+++ b/google/cloud/automl_v1beta1/services/prediction_service/transports/grpc_asyncio.py
@@ -151,6 +151,8 @@ def __init__(
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
+ self._ssl_channel_credentials = ssl_channel_credentials
+
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
@@ -158,6 +160,7 @@ def __init__(
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
+ self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
@@ -194,6 +197,7 @@ def __init__(
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
)
+ self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
diff --git a/google/cloud/automl_v1beta1/types/classification.py b/google/cloud/automl_v1beta1/types/classification.py
index 4b5e5a2a..b9f21ba3 100644
--- a/google/cloud/automl_v1beta1/types/classification.py
+++ b/google/cloud/automl_v1beta1/types/classification.py
@@ -99,7 +99,7 @@ class VideoClassificationAnnotation(proto.Message):
type_ = proto.Field(proto.STRING, number=1)
classification_annotation = proto.Field(
- proto.MESSAGE, number=2, message=ClassificationAnnotation,
+ proto.MESSAGE, number=2, message="ClassificationAnnotation",
)
time_segment = proto.Field(proto.MESSAGE, number=3, message=temporal.TimeSegment,)
diff --git a/google/cloud/automl_v1beta1/types/data_items.py b/google/cloud/automl_v1beta1/types/data_items.py
index 4e0037b0..eff58bee 100644
--- a/google/cloud/automl_v1beta1/types/data_items.py
+++ b/google/cloud/automl_v1beta1/types/data_items.py
@@ -195,12 +195,12 @@ class TextSegmentType(proto.Enum):
input_config = proto.Field(proto.MESSAGE, number=1, message=io.DocumentInputConfig,)
- document_text = proto.Field(proto.MESSAGE, number=2, message=TextSnippet,)
+ document_text = proto.Field(proto.MESSAGE, number=2, message="TextSnippet",)
layout = proto.RepeatedField(proto.MESSAGE, number=3, message=Layout,)
document_dimensions = proto.Field(
- proto.MESSAGE, number=4, message=DocumentDimensions,
+ proto.MESSAGE, number=4, message="DocumentDimensions",
)
page_count = proto.Field(proto.INT32, number=5)
@@ -247,15 +247,17 @@ class ExamplePayload(proto.Message):
Example relational table row.
"""
- image = proto.Field(proto.MESSAGE, number=1, oneof="payload", message=Image,)
+ image = proto.Field(proto.MESSAGE, number=1, oneof="payload", message="Image",)
text_snippet = proto.Field(
- proto.MESSAGE, number=2, oneof="payload", message=TextSnippet,
+ proto.MESSAGE, number=2, oneof="payload", message="TextSnippet",
)
- document = proto.Field(proto.MESSAGE, number=4, oneof="payload", message=Document,)
+ document = proto.Field(
+ proto.MESSAGE, number=4, oneof="payload", message="Document",
+ )
- row = proto.Field(proto.MESSAGE, number=3, oneof="payload", message=Row,)
+ row = proto.Field(proto.MESSAGE, number=3, oneof="payload", message="Row",)
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/automl_v1beta1/types/data_stats.py b/google/cloud/automl_v1beta1/types/data_stats.py
index 4405185f..75a9dc38 100644
--- a/google/cloud/automl_v1beta1/types/data_stats.py
+++ b/google/cloud/automl_v1beta1/types/data_stats.py
@@ -210,7 +210,7 @@ class ArrayStats(proto.Message):
depends on the element type of the array.
"""
- member_stats = proto.Field(proto.MESSAGE, number=2, message=DataStats,)
+ member_stats = proto.Field(proto.MESSAGE, number=2, message="DataStats",)
class StructStats(proto.Message):
@@ -224,7 +224,7 @@ class StructStats(proto.Message):
"""
field_stats = proto.MapField(
- proto.STRING, proto.MESSAGE, number=1, message=DataStats,
+ proto.STRING, proto.MESSAGE, number=1, message="DataStats",
)
diff --git a/google/cloud/automl_v1beta1/types/data_types.py b/google/cloud/automl_v1beta1/types/data_types.py
index e2a3152e..6faa598b 100644
--- a/google/cloud/automl_v1beta1/types/data_types.py
+++ b/google/cloud/automl_v1beta1/types/data_types.py
@@ -105,7 +105,7 @@ class StructType(proto.Message):
mutable.
"""
- fields = proto.MapField(proto.STRING, proto.MESSAGE, number=1, message=DataType,)
+ fields = proto.MapField(proto.STRING, proto.MESSAGE, number=1, message="DataType",)
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/automl_v1beta1/types/detection.py b/google/cloud/automl_v1beta1/types/detection.py
index 3f3339ab..c14a1cb5 100644
--- a/google/cloud/automl_v1beta1/types/detection.py
+++ b/google/cloud/automl_v1beta1/types/detection.py
@@ -171,7 +171,7 @@ class ImageObjectDetectionEvaluationMetrics(proto.Message):
evaluated_bounding_box_count = proto.Field(proto.INT32, number=1)
bounding_box_metrics_entries = proto.RepeatedField(
- proto.MESSAGE, number=2, message=BoundingBoxMetricsEntry,
+ proto.MESSAGE, number=2, message="BoundingBoxMetricsEntry",
)
bounding_box_mean_average_precision = proto.Field(proto.FLOAT, number=3)
@@ -208,7 +208,7 @@ class VideoObjectTrackingEvaluationMetrics(proto.Message):
evaluated_bounding_box_count = proto.Field(proto.INT32, number=2)
bounding_box_metrics_entries = proto.RepeatedField(
- proto.MESSAGE, number=4, message=BoundingBoxMetricsEntry,
+ proto.MESSAGE, number=4, message="BoundingBoxMetricsEntry",
)
bounding_box_mean_average_precision = proto.Field(proto.FLOAT, number=6)
diff --git a/google/cloud/automl_v1beta1/types/geometry.py b/google/cloud/automl_v1beta1/types/geometry.py
index 7a463691..f64a477f 100644
--- a/google/cloud/automl_v1beta1/types/geometry.py
+++ b/google/cloud/automl_v1beta1/types/geometry.py
@@ -56,7 +56,7 @@ class BoundingPoly(proto.Message):
"""
normalized_vertices = proto.RepeatedField(
- proto.MESSAGE, number=2, message=NormalizedVertex,
+ proto.MESSAGE, number=2, message="NormalizedVertex",
)
diff --git a/noxfile.py b/noxfile.py
index 709afdde..f451c21c 100644
--- a/noxfile.py
+++ b/noxfile.py
@@ -28,7 +28,7 @@
DEFAULT_PYTHON_VERSION = "3.8"
SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"]
-UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8"]
+UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9"]
@nox.session(python=DEFAULT_PYTHON_VERSION)
@@ -72,7 +72,9 @@ def default(session):
# Install all test dependencies, then install this package in-place.
session.install("asyncmock", "pytest-asyncio")
- session.install("mock", "pytest", "pytest-cov")
+ session.install(
+ "mock", "pytest", "pytest-cov",
+ )
session.install("-e", ".[pandas,storage]")
# Run py.test against the unit tests.
@@ -172,7 +174,9 @@ def docfx(session):
"""Build the docfx yaml files for this library."""
session.install("-e", ".[pandas,storage]")
- session.install("sphinx<3.0.0", "alabaster", "recommonmark", "sphinx-docfx-yaml")
+ # sphinx-docfx-yaml supports up to sphinx version 1.5.5.
+ # https://github.com/docascode/sphinx-docfx-yaml/issues/97
+ session.install("sphinx==1.5.5", "alabaster", "recommonmark", "sphinx-docfx-yaml")
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
diff --git a/samples/beta/noxfile.py b/samples/beta/noxfile.py
index ba55d7ce..b90eef00 100644
--- a/samples/beta/noxfile.py
+++ b/samples/beta/noxfile.py
@@ -39,6 +39,10 @@
# You can opt out from the test for specific Python versions.
'ignored_versions': ["2.7"],
+ # Old samples are opted out of enforcing Python type hints
+ # All new samples should feature them
+ 'enforce_type_hints': False,
+
# An envvar key for determining the project id to use. Change it
# to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a
# build specific Cloud project. You can also use your own string
@@ -132,7 +136,10 @@ def _determine_local_import_names(start_dir):
@nox.session
def lint(session):
- session.install("flake8", "flake8-import-order")
+ if not TEST_CONFIG['enforce_type_hints']:
+ session.install("flake8", "flake8-import-order")
+ else:
+ session.install("flake8", "flake8-import-order", "flake8-annotations")
local_names = _determine_local_import_names(".")
args = FLAKE8_COMMON_ARGS + [
@@ -141,8 +148,18 @@ def lint(session):
"."
]
session.run("flake8", *args)
+#
+# Black
+#
+@nox.session
+def blacken(session):
+ session.install("black")
+ python_files = [path for path in os.listdir(".") if path.endswith(".py")]
+
+ session.run("black", *python_files)
+
#
# Sample Tests
#
@@ -201,6 +218,11 @@ def _get_repo_root():
break
if Path(p / ".git").exists():
return str(p)
+ # .git is not available in repos cloned via Cloud Build
+ # setup.py is always in the library's root, so use that instead
+ # https://github.com/googleapis/synthtool/issues/792
+ if Path(p / "setup.py").exists():
+ return str(p)
p = p.parent
raise Exception("Unable to detect repository root.")
diff --git a/samples/snippets/noxfile.py b/samples/snippets/noxfile.py
index ba55d7ce..b90eef00 100644
--- a/samples/snippets/noxfile.py
+++ b/samples/snippets/noxfile.py
@@ -39,6 +39,10 @@
# You can opt out from the test for specific Python versions.
'ignored_versions': ["2.7"],
+ # Old samples are opted out of enforcing Python type hints
+ # All new samples should feature them
+ 'enforce_type_hints': False,
+
# An envvar key for determining the project id to use. Change it
# to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a
# build specific Cloud project. You can also use your own string
@@ -132,7 +136,10 @@ def _determine_local_import_names(start_dir):
@nox.session
def lint(session):
- session.install("flake8", "flake8-import-order")
+ if not TEST_CONFIG['enforce_type_hints']:
+ session.install("flake8", "flake8-import-order")
+ else:
+ session.install("flake8", "flake8-import-order", "flake8-annotations")
local_names = _determine_local_import_names(".")
args = FLAKE8_COMMON_ARGS + [
@@ -141,8 +148,18 @@ def lint(session):
"."
]
session.run("flake8", *args)
+#
+# Black
+#
+@nox.session
+def blacken(session):
+ session.install("black")
+ python_files = [path for path in os.listdir(".") if path.endswith(".py")]
+
+ session.run("black", *python_files)
+
#
# Sample Tests
#
@@ -201,6 +218,11 @@ def _get_repo_root():
break
if Path(p / ".git").exists():
return str(p)
+ # .git is not available in repos cloned via Cloud Build
+ # setup.py is always in the library's root, so use that instead
+ # https://github.com/googleapis/synthtool/issues/792
+ if Path(p / "setup.py").exists():
+ return str(p)
p = p.parent
raise Exception("Unable to detect repository root.")
diff --git a/samples/tables/noxfile.py b/samples/tables/noxfile.py
index ba55d7ce..b90eef00 100644
--- a/samples/tables/noxfile.py
+++ b/samples/tables/noxfile.py
@@ -39,6 +39,10 @@
# You can opt out from the test for specific Python versions.
'ignored_versions': ["2.7"],
+ # Old samples are opted out of enforcing Python type hints
+ # All new samples should feature them
+ 'enforce_type_hints': False,
+
# An envvar key for determining the project id to use. Change it
# to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a
# build specific Cloud project. You can also use your own string
@@ -132,7 +136,10 @@ def _determine_local_import_names(start_dir):
@nox.session
def lint(session):
- session.install("flake8", "flake8-import-order")
+ if not TEST_CONFIG['enforce_type_hints']:
+ session.install("flake8", "flake8-import-order")
+ else:
+ session.install("flake8", "flake8-import-order", "flake8-annotations")
local_names = _determine_local_import_names(".")
args = FLAKE8_COMMON_ARGS + [
@@ -141,8 +148,18 @@ def lint(session):
"."
]
session.run("flake8", *args)
+#
+# Black
+#
+@nox.session
+def blacken(session):
+ session.install("black")
+ python_files = [path for path in os.listdir(".") if path.endswith(".py")]
+
+ session.run("black", *python_files)
+
#
# Sample Tests
#
@@ -201,6 +218,11 @@ def _get_repo_root():
break
if Path(p / ".git").exists():
return str(p)
+ # .git is not available in repos cloned via Cloud Build
+ # setup.py is always in the library's root, so use that instead
+ # https://github.com/googleapis/synthtool/issues/792
+ if Path(p / "setup.py").exists():
+ return str(p)
p = p.parent
raise Exception("Unable to detect repository root.")
diff --git a/scripts/decrypt-secrets.sh b/scripts/decrypt-secrets.sh
index ff599eb2..21f6d2a2 100755
--- a/scripts/decrypt-secrets.sh
+++ b/scripts/decrypt-secrets.sh
@@ -20,14 +20,27 @@ ROOT=$( dirname "$DIR" )
# Work from the project root.
cd $ROOT
+# Prevent it from overriding files.
+# We recommend that sample authors use their own service account files and cloud project.
+# In that case, they are supposed to prepare these files by themselves.
+if [[ -f "testing/test-env.sh" ]] || \
+ [[ -f "testing/service-account.json" ]] || \
+ [[ -f "testing/client-secrets.json" ]]; then
+ echo "One or more target files exist, aborting."
+ exit 1
+fi
+
# Use SECRET_MANAGER_PROJECT if set, fallback to cloud-devrel-kokoro-resources.
PROJECT_ID="${SECRET_MANAGER_PROJECT:-cloud-devrel-kokoro-resources}"
gcloud secrets versions access latest --secret="python-docs-samples-test-env" \
+ --project="${PROJECT_ID}" \
> testing/test-env.sh
gcloud secrets versions access latest \
--secret="python-docs-samples-service-account" \
+ --project="${PROJECT_ID}" \
> testing/service-account.json
gcloud secrets versions access latest \
--secret="python-docs-samples-client-secrets" \
- > testing/client-secrets.json
\ No newline at end of file
+ --project="${PROJECT_ID}" \
+ > testing/client-secrets.json
diff --git a/scripts/fixup_automl_v1_keywords.py b/scripts/fixup_automl_v1_keywords.py
index 85b5fc68..9051bf99 100644
--- a/scripts/fixup_automl_v1_keywords.py
+++ b/scripts/fixup_automl_v1_keywords.py
@@ -1,3 +1,4 @@
+#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
diff --git a/scripts/fixup_automl_v1beta1_keywords.py b/scripts/fixup_automl_v1beta1_keywords.py
index 1644607f..8a34eafd 100644
--- a/scripts/fixup_automl_v1beta1_keywords.py
+++ b/scripts/fixup_automl_v1beta1_keywords.py
@@ -1,3 +1,4 @@
+#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
diff --git a/synth.metadata b/synth.metadata
index fe27c523..5cc47549 100644
--- a/synth.metadata
+++ b/synth.metadata
@@ -3,30 +3,30 @@
{
"git": {
"name": ".",
- "remote": "https://github.com/googleapis/python-automl.git",
- "sha": "8c7d54872a6e5628171f160e1a39a067d5f46563"
+ "remote": "git@github.com:googleapis/python-automl",
+ "sha": "4cfccc2126bef3cc5ab4a324020bfa4c0fe48318"
}
},
{
"git": {
"name": "googleapis",
"remote": "https://github.com/googleapis/googleapis.git",
- "sha": "20b11dfe4538cd5da7b4c3dd7d2bf5b9922ff3ed",
- "internalRef": "338646463"
+ "sha": "6a69c750c3f01a69017662395f90515bbf1fe1ff",
+ "internalRef": "342721036"
}
},
{
"git": {
"name": "synthtool",
"remote": "https://github.com/googleapis/synthtool.git",
- "sha": "9602086c6c5b05db77950c7f7495a2a3868f3537"
+ "sha": "d5fc0bcf9ea9789c5b0e3154a9e3b29e5cea6116"
}
},
{
"git": {
"name": "synthtool",
"remote": "https://github.com/googleapis/synthtool.git",
- "sha": "9602086c6c5b05db77950c7f7495a2a3868f3537"
+ "sha": "d5fc0bcf9ea9789c5b0e3154a9e3b29e5cea6116"
}
}
],
@@ -49,221 +49,5 @@
"generator": "bazel"
}
}
- ],
- "generatedFiles": [
- ".coveragerc",
- ".flake8",
- ".github/CONTRIBUTING.md",
- ".github/ISSUE_TEMPLATE/bug_report.md",
- ".github/ISSUE_TEMPLATE/feature_request.md",
- ".github/ISSUE_TEMPLATE/support_request.md",
- ".github/PULL_REQUEST_TEMPLATE.md",
- ".github/release-please.yml",
- ".gitignore",
- ".kokoro/build.sh",
- ".kokoro/continuous/common.cfg",
- ".kokoro/continuous/continuous.cfg",
- ".kokoro/docker/docs/Dockerfile",
- ".kokoro/docker/docs/fetch_gpg_keys.sh",
- ".kokoro/docs/common.cfg",
- ".kokoro/docs/docs-presubmit.cfg",
- ".kokoro/docs/docs.cfg",
- ".kokoro/presubmit/common.cfg",
- ".kokoro/presubmit/presubmit.cfg",
- ".kokoro/publish-docs.sh",
- ".kokoro/release.sh",
- ".kokoro/release/common.cfg",
- ".kokoro/release/release.cfg",
- ".kokoro/samples/lint/common.cfg",
- ".kokoro/samples/lint/continuous.cfg",
- ".kokoro/samples/lint/periodic.cfg",
- ".kokoro/samples/lint/presubmit.cfg",
- ".kokoro/samples/python3.6/common.cfg",
- ".kokoro/samples/python3.6/continuous.cfg",
- ".kokoro/samples/python3.6/periodic.cfg",
- ".kokoro/samples/python3.6/presubmit.cfg",
- ".kokoro/samples/python3.7/common.cfg",
- ".kokoro/samples/python3.7/continuous.cfg",
- ".kokoro/samples/python3.7/periodic.cfg",
- ".kokoro/samples/python3.7/presubmit.cfg",
- ".kokoro/samples/python3.8/common.cfg",
- ".kokoro/samples/python3.8/continuous.cfg",
- ".kokoro/samples/python3.8/periodic.cfg",
- ".kokoro/samples/python3.8/presubmit.cfg",
- ".kokoro/test-samples.sh",
- ".kokoro/trampoline.sh",
- ".kokoro/trampoline_v2.sh",
- ".trampolinerc",
- "CODE_OF_CONDUCT.md",
- "CONTRIBUTING.rst",
- "LICENSE",
- "MANIFEST.in",
- "docs/_static/custom.css",
- "docs/_templates/layout.html",
- "docs/automl_v1/services.rst",
- "docs/automl_v1/types.rst",
- "docs/automl_v1beta1/services.rst",
- "docs/automl_v1beta1/types.rst",
- "docs/conf.py",
- "docs/multiprocessing.rst",
- "google/cloud/automl/__init__.py",
- "google/cloud/automl/py.typed",
- "google/cloud/automl_v1/__init__.py",
- "google/cloud/automl_v1/proto/annotation_payload.proto",
- "google/cloud/automl_v1/proto/annotation_spec.proto",
- "google/cloud/automl_v1/proto/classification.proto",
- "google/cloud/automl_v1/proto/data_items.proto",
- "google/cloud/automl_v1/proto/dataset.proto",
- "google/cloud/automl_v1/proto/detection.proto",
- "google/cloud/automl_v1/proto/geometry.proto",
- "google/cloud/automl_v1/proto/image.proto",
- "google/cloud/automl_v1/proto/io.proto",
- "google/cloud/automl_v1/proto/model.proto",
- "google/cloud/automl_v1/proto/model_evaluation.proto",
- "google/cloud/automl_v1/proto/operations.proto",
- "google/cloud/automl_v1/proto/prediction_service.proto",
- "google/cloud/automl_v1/proto/service.proto",
- "google/cloud/automl_v1/proto/text.proto",
- "google/cloud/automl_v1/proto/text_extraction.proto",
- "google/cloud/automl_v1/proto/text_segment.proto",
- "google/cloud/automl_v1/proto/text_sentiment.proto",
- "google/cloud/automl_v1/proto/translation.proto",
- "google/cloud/automl_v1/py.typed",
- "google/cloud/automl_v1/services/__init__.py",
- "google/cloud/automl_v1/services/auto_ml/__init__.py",
- "google/cloud/automl_v1/services/auto_ml/async_client.py",
- "google/cloud/automl_v1/services/auto_ml/client.py",
- "google/cloud/automl_v1/services/auto_ml/pagers.py",
- "google/cloud/automl_v1/services/auto_ml/transports/__init__.py",
- "google/cloud/automl_v1/services/auto_ml/transports/base.py",
- "google/cloud/automl_v1/services/auto_ml/transports/grpc.py",
- "google/cloud/automl_v1/services/auto_ml/transports/grpc_asyncio.py",
- "google/cloud/automl_v1/services/prediction_service/__init__.py",
- "google/cloud/automl_v1/services/prediction_service/async_client.py",
- "google/cloud/automl_v1/services/prediction_service/client.py",
- "google/cloud/automl_v1/services/prediction_service/transports/__init__.py",
- "google/cloud/automl_v1/services/prediction_service/transports/base.py",
- "google/cloud/automl_v1/services/prediction_service/transports/grpc.py",
- "google/cloud/automl_v1/services/prediction_service/transports/grpc_asyncio.py",
- "google/cloud/automl_v1/types/__init__.py",
- "google/cloud/automl_v1/types/annotation_payload.py",
- "google/cloud/automl_v1/types/annotation_spec.py",
- "google/cloud/automl_v1/types/classification.py",
- "google/cloud/automl_v1/types/data_items.py",
- "google/cloud/automl_v1/types/dataset.py",
- "google/cloud/automl_v1/types/detection.py",
- "google/cloud/automl_v1/types/geometry.py",
- "google/cloud/automl_v1/types/image.py",
- "google/cloud/automl_v1/types/io.py",
- "google/cloud/automl_v1/types/model.py",
- "google/cloud/automl_v1/types/model_evaluation.py",
- "google/cloud/automl_v1/types/operations.py",
- "google/cloud/automl_v1/types/prediction_service.py",
- "google/cloud/automl_v1/types/service.py",
- "google/cloud/automl_v1/types/text.py",
- "google/cloud/automl_v1/types/text_extraction.py",
- "google/cloud/automl_v1/types/text_segment.py",
- "google/cloud/automl_v1/types/text_sentiment.py",
- "google/cloud/automl_v1/types/translation.py",
- "google/cloud/automl_v1beta1/__init__.py",
- "google/cloud/automl_v1beta1/proto/annotation_payload.proto",
- "google/cloud/automl_v1beta1/proto/annotation_spec.proto",
- "google/cloud/automl_v1beta1/proto/classification.proto",
- "google/cloud/automl_v1beta1/proto/column_spec.proto",
- "google/cloud/automl_v1beta1/proto/data_items.proto",
- "google/cloud/automl_v1beta1/proto/data_stats.proto",
- "google/cloud/automl_v1beta1/proto/data_types.proto",
- "google/cloud/automl_v1beta1/proto/dataset.proto",
- "google/cloud/automl_v1beta1/proto/detection.proto",
- "google/cloud/automl_v1beta1/proto/geometry.proto",
- "google/cloud/automl_v1beta1/proto/image.proto",
- "google/cloud/automl_v1beta1/proto/io.proto",
- "google/cloud/automl_v1beta1/proto/model.proto",
- "google/cloud/automl_v1beta1/proto/model_evaluation.proto",
- "google/cloud/automl_v1beta1/proto/operations.proto",
- "google/cloud/automl_v1beta1/proto/prediction_service.proto",
- "google/cloud/automl_v1beta1/proto/ranges.proto",
- "google/cloud/automl_v1beta1/proto/regression.proto",
- "google/cloud/automl_v1beta1/proto/service.proto",
- "google/cloud/automl_v1beta1/proto/table_spec.proto",
- "google/cloud/automl_v1beta1/proto/tables.proto",
- "google/cloud/automl_v1beta1/proto/temporal.proto",
- "google/cloud/automl_v1beta1/proto/text.proto",
- "google/cloud/automl_v1beta1/proto/text_extraction.proto",
- "google/cloud/automl_v1beta1/proto/text_segment.proto",
- "google/cloud/automl_v1beta1/proto/text_sentiment.proto",
- "google/cloud/automl_v1beta1/proto/translation.proto",
- "google/cloud/automl_v1beta1/proto/video.proto",
- "google/cloud/automl_v1beta1/py.typed",
- "google/cloud/automl_v1beta1/services/__init__.py",
- "google/cloud/automl_v1beta1/services/auto_ml/__init__.py",
- "google/cloud/automl_v1beta1/services/auto_ml/async_client.py",
- "google/cloud/automl_v1beta1/services/auto_ml/client.py",
- "google/cloud/automl_v1beta1/services/auto_ml/pagers.py",
- "google/cloud/automl_v1beta1/services/auto_ml/transports/__init__.py",
- "google/cloud/automl_v1beta1/services/auto_ml/transports/base.py",
- "google/cloud/automl_v1beta1/services/auto_ml/transports/grpc.py",
- "google/cloud/automl_v1beta1/services/auto_ml/transports/grpc_asyncio.py",
- "google/cloud/automl_v1beta1/services/prediction_service/__init__.py",
- "google/cloud/automl_v1beta1/services/prediction_service/async_client.py",
- "google/cloud/automl_v1beta1/services/prediction_service/client.py",
- "google/cloud/automl_v1beta1/services/prediction_service/transports/__init__.py",
- "google/cloud/automl_v1beta1/services/prediction_service/transports/base.py",
- "google/cloud/automl_v1beta1/services/prediction_service/transports/grpc.py",
- "google/cloud/automl_v1beta1/services/prediction_service/transports/grpc_asyncio.py",
- "google/cloud/automl_v1beta1/types/__init__.py",
- "google/cloud/automl_v1beta1/types/annotation_payload.py",
- "google/cloud/automl_v1beta1/types/annotation_spec.py",
- "google/cloud/automl_v1beta1/types/classification.py",
- "google/cloud/automl_v1beta1/types/column_spec.py",
- "google/cloud/automl_v1beta1/types/data_items.py",
- "google/cloud/automl_v1beta1/types/data_stats.py",
- "google/cloud/automl_v1beta1/types/data_types.py",
- "google/cloud/automl_v1beta1/types/dataset.py",
- "google/cloud/automl_v1beta1/types/detection.py",
- "google/cloud/automl_v1beta1/types/geometry.py",
- "google/cloud/automl_v1beta1/types/image.py",
- "google/cloud/automl_v1beta1/types/io.py",
- "google/cloud/automl_v1beta1/types/model.py",
- "google/cloud/automl_v1beta1/types/model_evaluation.py",
- "google/cloud/automl_v1beta1/types/operations.py",
- "google/cloud/automl_v1beta1/types/prediction_service.py",
- "google/cloud/automl_v1beta1/types/ranges.py",
- "google/cloud/automl_v1beta1/types/regression.py",
- "google/cloud/automl_v1beta1/types/service.py",
- "google/cloud/automl_v1beta1/types/table_spec.py",
- "google/cloud/automl_v1beta1/types/tables.py",
- "google/cloud/automl_v1beta1/types/temporal.py",
- "google/cloud/automl_v1beta1/types/text.py",
- "google/cloud/automl_v1beta1/types/text_extraction.py",
- "google/cloud/automl_v1beta1/types/text_segment.py",
- "google/cloud/automl_v1beta1/types/text_sentiment.py",
- "google/cloud/automl_v1beta1/types/translation.py",
- "google/cloud/automl_v1beta1/types/video.py",
- "mypy.ini",
- "noxfile.py",
- "renovate.json",
- "samples/AUTHORING_GUIDE.md",
- "samples/CONTRIBUTING.md",
- "samples/beta/noxfile.py",
- "samples/snippets/noxfile.py",
- "samples/tables/noxfile.py",
- "scripts/decrypt-secrets.sh",
- "scripts/fixup_automl_v1_keywords.py",
- "scripts/fixup_automl_v1beta1_keywords.py",
- "scripts/readme-gen/readme_gen.py",
- "scripts/readme-gen/templates/README.tmpl.rst",
- "scripts/readme-gen/templates/auth.tmpl.rst",
- "scripts/readme-gen/templates/auth_api_key.tmpl.rst",
- "scripts/readme-gen/templates/install_deps.tmpl.rst",
- "scripts/readme-gen/templates/install_portaudio.tmpl.rst",
- "setup.cfg",
- "testing/.gitignore",
- "tests/unit/gapic/automl_v1/__init__.py",
- "tests/unit/gapic/automl_v1/test_auto_ml.py",
- "tests/unit/gapic/automl_v1/test_prediction_service.py",
- "tests/unit/gapic/automl_v1beta1/__init__.py",
- "tests/unit/gapic/automl_v1beta1/test_auto_ml.py",
- "tests/unit/gapic/automl_v1beta1/test_prediction_service.py"
]
}
\ No newline at end of file
diff --git a/tests/unit/gapic/automl_v1/test_auto_ml.py b/tests/unit/gapic/automl_v1/test_auto_ml.py
index a11480cd..d7fb0dcb 100644
--- a/tests/unit/gapic/automl_v1/test_auto_ml.py
+++ b/tests/unit/gapic/automl_v1/test_auto_ml.py
@@ -107,12 +107,12 @@ def test_auto_ml_client_from_service_account_file(client_class):
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
- assert client._transport._credentials == creds
+ assert client.transport._credentials == creds
client = client_class.from_service_account_json("dummy/file/path.json")
- assert client._transport._credentials == creds
+ assert client.transport._credentials == creds
- assert client._transport._host == "automl.googleapis.com:443"
+ assert client.transport._host == "automl.googleapis.com:443"
def test_auto_ml_client_get_transport_class():
@@ -438,7 +438,7 @@ def test_create_dataset(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.create_dataset), "__call__") as call:
+ with mock.patch.object(type(client.transport.create_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
@@ -459,19 +459,19 @@ def test_create_dataset_from_dict():
@pytest.mark.asyncio
-async def test_create_dataset_async(transport: str = "grpc_asyncio"):
+async def test_create_dataset_async(
+ transport: str = "grpc_asyncio", request_type=service.CreateDatasetRequest
+):
client = AutoMlAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = service.CreateDatasetRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.create_dataset), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.create_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
@@ -483,12 +483,17 @@ async def test_create_dataset_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == service.CreateDatasetRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
+@pytest.mark.asyncio
+async def test_create_dataset_async_from_dict():
+ await test_create_dataset_async(request_type=dict)
+
+
def test_create_dataset_field_headers():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
@@ -498,7 +503,7 @@ def test_create_dataset_field_headers():
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.create_dataset), "__call__") as call:
+ with mock.patch.object(type(client.transport.create_dataset), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.create_dataset(request)
@@ -523,9 +528,7 @@ async def test_create_dataset_field_headers_async():
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.create_dataset), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.create_dataset), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
@@ -546,7 +549,7 @@ def test_create_dataset_flattened():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.create_dataset), "__call__") as call:
+ with mock.patch.object(type(client.transport.create_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
@@ -597,9 +600,7 @@ async def test_create_dataset_flattened_async():
client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.create_dataset), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.create_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
@@ -659,7 +660,7 @@ def test_get_dataset(transport: str = "grpc", request_type=service.GetDatasetReq
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.get_dataset), "__call__") as call:
+ with mock.patch.object(type(client.transport.get_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = dataset.Dataset(
name="name_value",
@@ -681,6 +682,7 @@ def test_get_dataset(transport: str = "grpc", request_type=service.GetDatasetReq
assert args[0] == service.GetDatasetRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, dataset.Dataset)
assert response.name == "name_value"
@@ -699,19 +701,19 @@ def test_get_dataset_from_dict():
@pytest.mark.asyncio
-async def test_get_dataset_async(transport: str = "grpc_asyncio"):
+async def test_get_dataset_async(
+ transport: str = "grpc_asyncio", request_type=service.GetDatasetRequest
+):
client = AutoMlAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = service.GetDatasetRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.get_dataset), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.get_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
dataset.Dataset(
@@ -729,7 +731,7 @@ async def test_get_dataset_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == service.GetDatasetRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, dataset.Dataset)
@@ -745,6 +747,11 @@ async def test_get_dataset_async(transport: str = "grpc_asyncio"):
assert response.etag == "etag_value"
+@pytest.mark.asyncio
+async def test_get_dataset_async_from_dict():
+ await test_get_dataset_async(request_type=dict)
+
+
def test_get_dataset_field_headers():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
@@ -754,7 +761,7 @@ def test_get_dataset_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.get_dataset), "__call__") as call:
+ with mock.patch.object(type(client.transport.get_dataset), "__call__") as call:
call.return_value = dataset.Dataset()
client.get_dataset(request)
@@ -779,9 +786,7 @@ async def test_get_dataset_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.get_dataset), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.get_dataset), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset())
await client.get_dataset(request)
@@ -800,7 +805,7 @@ def test_get_dataset_flattened():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.get_dataset), "__call__") as call:
+ with mock.patch.object(type(client.transport.get_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = dataset.Dataset()
@@ -832,9 +837,7 @@ async def test_get_dataset_flattened_async():
client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.get_dataset), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.get_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = dataset.Dataset()
@@ -875,7 +878,7 @@ def test_list_datasets(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_datasets), "__call__") as call:
+ with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = service.ListDatasetsResponse(
next_page_token="next_page_token_value",
@@ -890,6 +893,7 @@ def test_list_datasets(
assert args[0] == service.ListDatasetsRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, pagers.ListDatasetsPager)
assert response.next_page_token == "next_page_token_value"
@@ -900,19 +904,19 @@ def test_list_datasets_from_dict():
@pytest.mark.asyncio
-async def test_list_datasets_async(transport: str = "grpc_asyncio"):
+async def test_list_datasets_async(
+ transport: str = "grpc_asyncio", request_type=service.ListDatasetsRequest
+):
client = AutoMlAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = service.ListDatasetsRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.list_datasets), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListDatasetsResponse(next_page_token="next_page_token_value",)
@@ -924,7 +928,7 @@ async def test_list_datasets_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == service.ListDatasetsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListDatasetsAsyncPager)
@@ -932,6 +936,11 @@ async def test_list_datasets_async(transport: str = "grpc_asyncio"):
assert response.next_page_token == "next_page_token_value"
+@pytest.mark.asyncio
+async def test_list_datasets_async_from_dict():
+ await test_list_datasets_async(request_type=dict)
+
+
def test_list_datasets_field_headers():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
@@ -941,7 +950,7 @@ def test_list_datasets_field_headers():
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_datasets), "__call__") as call:
+ with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
call.return_value = service.ListDatasetsResponse()
client.list_datasets(request)
@@ -966,9 +975,7 @@ async def test_list_datasets_field_headers_async():
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.list_datasets), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListDatasetsResponse()
)
@@ -989,7 +996,7 @@ def test_list_datasets_flattened():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_datasets), "__call__") as call:
+ with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = service.ListDatasetsResponse()
@@ -1021,9 +1028,7 @@ async def test_list_datasets_flattened_async():
client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.list_datasets), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = service.ListDatasetsResponse()
@@ -1058,7 +1063,7 @@ def test_list_datasets_pager():
client = AutoMlClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_datasets), "__call__") as call:
+ with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListDatasetsResponse(
@@ -1092,7 +1097,7 @@ def test_list_datasets_pages():
client = AutoMlClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_datasets), "__call__") as call:
+ with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListDatasetsResponse(
@@ -1119,9 +1124,7 @@ async def test_list_datasets_async_pager():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.list_datasets),
- "__call__",
- new_callable=mock.AsyncMock,
+ type(client.transport.list_datasets), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
@@ -1154,9 +1157,7 @@ async def test_list_datasets_async_pages():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.list_datasets),
- "__call__",
- new_callable=mock.AsyncMock,
+ type(client.transport.list_datasets), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
@@ -1192,7 +1193,7 @@ def test_update_dataset(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.update_dataset), "__call__") as call:
+ with mock.patch.object(type(client.transport.update_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_dataset.Dataset(
name="name_value",
@@ -1214,6 +1215,7 @@ def test_update_dataset(
assert args[0] == service.UpdateDatasetRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, gca_dataset.Dataset)
assert response.name == "name_value"
@@ -1232,19 +1234,19 @@ def test_update_dataset_from_dict():
@pytest.mark.asyncio
-async def test_update_dataset_async(transport: str = "grpc_asyncio"):
+async def test_update_dataset_async(
+ transport: str = "grpc_asyncio", request_type=service.UpdateDatasetRequest
+):
client = AutoMlAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = service.UpdateDatasetRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.update_dataset), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.update_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_dataset.Dataset(
@@ -1262,7 +1264,7 @@ async def test_update_dataset_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == service.UpdateDatasetRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_dataset.Dataset)
@@ -1278,6 +1280,11 @@ async def test_update_dataset_async(transport: str = "grpc_asyncio"):
assert response.etag == "etag_value"
+@pytest.mark.asyncio
+async def test_update_dataset_async_from_dict():
+ await test_update_dataset_async(request_type=dict)
+
+
def test_update_dataset_field_headers():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
@@ -1287,7 +1294,7 @@ def test_update_dataset_field_headers():
request.dataset.name = "dataset.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.update_dataset), "__call__") as call:
+ with mock.patch.object(type(client.transport.update_dataset), "__call__") as call:
call.return_value = gca_dataset.Dataset()
client.update_dataset(request)
@@ -1314,9 +1321,7 @@ async def test_update_dataset_field_headers_async():
request.dataset.name = "dataset.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.update_dataset), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.update_dataset), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset())
await client.update_dataset(request)
@@ -1337,7 +1342,7 @@ def test_update_dataset_flattened():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.update_dataset), "__call__") as call:
+ with mock.patch.object(type(client.transport.update_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_dataset.Dataset()
@@ -1388,9 +1393,7 @@ async def test_update_dataset_flattened_async():
client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.update_dataset), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.update_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_dataset.Dataset()
@@ -1450,7 +1453,7 @@ def test_delete_dataset(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.delete_dataset), "__call__") as call:
+ with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
@@ -1471,19 +1474,19 @@ def test_delete_dataset_from_dict():
@pytest.mark.asyncio
-async def test_delete_dataset_async(transport: str = "grpc_asyncio"):
+async def test_delete_dataset_async(
+ transport: str = "grpc_asyncio", request_type=service.DeleteDatasetRequest
+):
client = AutoMlAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = service.DeleteDatasetRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.delete_dataset), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
@@ -1495,12 +1498,17 @@ async def test_delete_dataset_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == service.DeleteDatasetRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
+@pytest.mark.asyncio
+async def test_delete_dataset_async_from_dict():
+ await test_delete_dataset_async(request_type=dict)
+
+
def test_delete_dataset_field_headers():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
@@ -1510,7 +1518,7 @@ def test_delete_dataset_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.delete_dataset), "__call__") as call:
+ with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_dataset(request)
@@ -1535,9 +1543,7 @@ async def test_delete_dataset_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.delete_dataset), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
@@ -1558,7 +1564,7 @@ def test_delete_dataset_flattened():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.delete_dataset), "__call__") as call:
+ with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
@@ -1590,9 +1596,7 @@ async def test_delete_dataset_flattened_async():
client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.delete_dataset), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
@@ -1633,7 +1637,7 @@ def test_import_data(transport: str = "grpc", request_type=service.ImportDataReq
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.import_data), "__call__") as call:
+ with mock.patch.object(type(client.transport.import_data), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
@@ -1654,19 +1658,19 @@ def test_import_data_from_dict():
@pytest.mark.asyncio
-async def test_import_data_async(transport: str = "grpc_asyncio"):
+async def test_import_data_async(
+ transport: str = "grpc_asyncio", request_type=service.ImportDataRequest
+):
client = AutoMlAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = service.ImportDataRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.import_data), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.import_data), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
@@ -1678,12 +1682,17 @@ async def test_import_data_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == service.ImportDataRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
+@pytest.mark.asyncio
+async def test_import_data_async_from_dict():
+ await test_import_data_async(request_type=dict)
+
+
def test_import_data_field_headers():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
@@ -1693,7 +1702,7 @@ def test_import_data_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.import_data), "__call__") as call:
+ with mock.patch.object(type(client.transport.import_data), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.import_data(request)
@@ -1718,9 +1727,7 @@ async def test_import_data_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.import_data), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.import_data), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
@@ -1741,7 +1748,7 @@ def test_import_data_flattened():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.import_data), "__call__") as call:
+ with mock.patch.object(type(client.transport.import_data), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
@@ -1786,9 +1793,7 @@ async def test_import_data_flattened_async():
client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.import_data), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.import_data), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
@@ -1842,7 +1847,7 @@ def test_export_data(transport: str = "grpc", request_type=service.ExportDataReq
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.export_data), "__call__") as call:
+ with mock.patch.object(type(client.transport.export_data), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
@@ -1863,19 +1868,19 @@ def test_export_data_from_dict():
@pytest.mark.asyncio
-async def test_export_data_async(transport: str = "grpc_asyncio"):
+async def test_export_data_async(
+ transport: str = "grpc_asyncio", request_type=service.ExportDataRequest
+):
client = AutoMlAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = service.ExportDataRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.export_data), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.export_data), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
@@ -1887,12 +1892,17 @@ async def test_export_data_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == service.ExportDataRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
+@pytest.mark.asyncio
+async def test_export_data_async_from_dict():
+ await test_export_data_async(request_type=dict)
+
+
def test_export_data_field_headers():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
@@ -1902,7 +1912,7 @@ def test_export_data_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.export_data), "__call__") as call:
+ with mock.patch.object(type(client.transport.export_data), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.export_data(request)
@@ -1927,9 +1937,7 @@ async def test_export_data_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.export_data), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.export_data), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
@@ -1950,7 +1958,7 @@ def test_export_data_flattened():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.export_data), "__call__") as call:
+ with mock.patch.object(type(client.transport.export_data), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
@@ -2001,9 +2009,7 @@ async def test_export_data_flattened_async():
client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.export_data), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.export_data), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
@@ -2066,7 +2072,7 @@ def test_get_annotation_spec(
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.get_annotation_spec), "__call__"
+ type(client.transport.get_annotation_spec), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = annotation_spec.AnnotationSpec(
@@ -2082,6 +2088,7 @@ def test_get_annotation_spec(
assert args[0] == service.GetAnnotationSpecRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, annotation_spec.AnnotationSpec)
assert response.name == "name_value"
@@ -2096,18 +2103,20 @@ def test_get_annotation_spec_from_dict():
@pytest.mark.asyncio
-async def test_get_annotation_spec_async(transport: str = "grpc_asyncio"):
+async def test_get_annotation_spec_async(
+ transport: str = "grpc_asyncio", request_type=service.GetAnnotationSpecRequest
+):
client = AutoMlAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = service.GetAnnotationSpecRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.get_annotation_spec), "__call__"
+ type(client.transport.get_annotation_spec), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
@@ -2124,7 +2133,7 @@ async def test_get_annotation_spec_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == service.GetAnnotationSpecRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, annotation_spec.AnnotationSpec)
@@ -2136,6 +2145,11 @@ async def test_get_annotation_spec_async(transport: str = "grpc_asyncio"):
assert response.example_count == 1396
+@pytest.mark.asyncio
+async def test_get_annotation_spec_async_from_dict():
+ await test_get_annotation_spec_async(request_type=dict)
+
+
def test_get_annotation_spec_field_headers():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
@@ -2146,7 +2160,7 @@ def test_get_annotation_spec_field_headers():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.get_annotation_spec), "__call__"
+ type(client.transport.get_annotation_spec), "__call__"
) as call:
call.return_value = annotation_spec.AnnotationSpec()
@@ -2173,7 +2187,7 @@ async def test_get_annotation_spec_field_headers_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.get_annotation_spec), "__call__"
+ type(client.transport.get_annotation_spec), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
annotation_spec.AnnotationSpec()
@@ -2196,7 +2210,7 @@ def test_get_annotation_spec_flattened():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.get_annotation_spec), "__call__"
+ type(client.transport.get_annotation_spec), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = annotation_spec.AnnotationSpec()
@@ -2230,7 +2244,7 @@ async def test_get_annotation_spec_flattened_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.get_annotation_spec), "__call__"
+ type(client.transport.get_annotation_spec), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = annotation_spec.AnnotationSpec()
@@ -2272,7 +2286,7 @@ def test_create_model(transport: str = "grpc", request_type=service.CreateModelR
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.create_model), "__call__") as call:
+ with mock.patch.object(type(client.transport.create_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
@@ -2293,19 +2307,19 @@ def test_create_model_from_dict():
@pytest.mark.asyncio
-async def test_create_model_async(transport: str = "grpc_asyncio"):
+async def test_create_model_async(
+ transport: str = "grpc_asyncio", request_type=service.CreateModelRequest
+):
client = AutoMlAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = service.CreateModelRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.create_model), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.create_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
@@ -2317,12 +2331,17 @@ async def test_create_model_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == service.CreateModelRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
+@pytest.mark.asyncio
+async def test_create_model_async_from_dict():
+ await test_create_model_async(request_type=dict)
+
+
def test_create_model_field_headers():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
@@ -2332,7 +2351,7 @@ def test_create_model_field_headers():
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.create_model), "__call__") as call:
+ with mock.patch.object(type(client.transport.create_model), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.create_model(request)
@@ -2357,9 +2376,7 @@ async def test_create_model_field_headers_async():
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.create_model), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.create_model), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
@@ -2380,7 +2397,7 @@ def test_create_model_flattened():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.create_model), "__call__") as call:
+ with mock.patch.object(type(client.transport.create_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
@@ -2431,9 +2448,7 @@ async def test_create_model_flattened_async():
client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.create_model), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.create_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
@@ -2493,7 +2508,7 @@ def test_get_model(transport: str = "grpc", request_type=service.GetModelRequest
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.get_model), "__call__") as call:
+ with mock.patch.object(type(client.transport.get_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = model.Model(
name="name_value",
@@ -2515,6 +2530,7 @@ def test_get_model(transport: str = "grpc", request_type=service.GetModelRequest
assert args[0] == service.GetModelRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, model.Model)
assert response.name == "name_value"
@@ -2533,19 +2549,19 @@ def test_get_model_from_dict():
@pytest.mark.asyncio
-async def test_get_model_async(transport: str = "grpc_asyncio"):
+async def test_get_model_async(
+ transport: str = "grpc_asyncio", request_type=service.GetModelRequest
+):
client = AutoMlAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = service.GetModelRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.get_model), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.get_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
model.Model(
@@ -2563,7 +2579,7 @@ async def test_get_model_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == service.GetModelRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, model.Model)
@@ -2579,6 +2595,11 @@ async def test_get_model_async(transport: str = "grpc_asyncio"):
assert response.etag == "etag_value"
+@pytest.mark.asyncio
+async def test_get_model_async_from_dict():
+ await test_get_model_async(request_type=dict)
+
+
def test_get_model_field_headers():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
@@ -2588,7 +2609,7 @@ def test_get_model_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.get_model), "__call__") as call:
+ with mock.patch.object(type(client.transport.get_model), "__call__") as call:
call.return_value = model.Model()
client.get_model(request)
@@ -2613,9 +2634,7 @@ async def test_get_model_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.get_model), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.get_model), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model.Model())
await client.get_model(request)
@@ -2634,7 +2653,7 @@ def test_get_model_flattened():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.get_model), "__call__") as call:
+ with mock.patch.object(type(client.transport.get_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = model.Model()
@@ -2666,9 +2685,7 @@ async def test_get_model_flattened_async():
client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.get_model), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.get_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = model.Model()
@@ -2707,7 +2724,7 @@ def test_list_models(transport: str = "grpc", request_type=service.ListModelsReq
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_models), "__call__") as call:
+ with mock.patch.object(type(client.transport.list_models), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = service.ListModelsResponse(
next_page_token="next_page_token_value",
@@ -2722,6 +2739,7 @@ def test_list_models(transport: str = "grpc", request_type=service.ListModelsReq
assert args[0] == service.ListModelsRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, pagers.ListModelsPager)
assert response.next_page_token == "next_page_token_value"
@@ -2732,19 +2750,19 @@ def test_list_models_from_dict():
@pytest.mark.asyncio
-async def test_list_models_async(transport: str = "grpc_asyncio"):
+async def test_list_models_async(
+ transport: str = "grpc_asyncio", request_type=service.ListModelsRequest
+):
client = AutoMlAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = service.ListModelsRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.list_models), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.list_models), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListModelsResponse(next_page_token="next_page_token_value",)
@@ -2756,7 +2774,7 @@ async def test_list_models_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == service.ListModelsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListModelsAsyncPager)
@@ -2764,6 +2782,11 @@ async def test_list_models_async(transport: str = "grpc_asyncio"):
assert response.next_page_token == "next_page_token_value"
+@pytest.mark.asyncio
+async def test_list_models_async_from_dict():
+ await test_list_models_async(request_type=dict)
+
+
def test_list_models_field_headers():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
@@ -2773,7 +2796,7 @@ def test_list_models_field_headers():
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_models), "__call__") as call:
+ with mock.patch.object(type(client.transport.list_models), "__call__") as call:
call.return_value = service.ListModelsResponse()
client.list_models(request)
@@ -2798,9 +2821,7 @@ async def test_list_models_field_headers_async():
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.list_models), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.list_models), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListModelsResponse()
)
@@ -2821,7 +2842,7 @@ def test_list_models_flattened():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_models), "__call__") as call:
+ with mock.patch.object(type(client.transport.list_models), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = service.ListModelsResponse()
@@ -2853,9 +2874,7 @@ async def test_list_models_flattened_async():
client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.list_models), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.list_models), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = service.ListModelsResponse()
@@ -2890,7 +2909,7 @@ def test_list_models_pager():
client = AutoMlClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_models), "__call__") as call:
+ with mock.patch.object(type(client.transport.list_models), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListModelsResponse(
@@ -2920,7 +2939,7 @@ def test_list_models_pages():
client = AutoMlClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_models), "__call__") as call:
+ with mock.patch.object(type(client.transport.list_models), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListModelsResponse(
@@ -2943,9 +2962,7 @@ async def test_list_models_async_pager():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.list_models),
- "__call__",
- new_callable=mock.AsyncMock,
+ type(client.transport.list_models), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
@@ -2974,9 +2991,7 @@ async def test_list_models_async_pages():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.list_models),
- "__call__",
- new_callable=mock.AsyncMock,
+ type(client.transport.list_models), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
@@ -3006,7 +3021,7 @@ def test_delete_model(transport: str = "grpc", request_type=service.DeleteModelR
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.delete_model), "__call__") as call:
+ with mock.patch.object(type(client.transport.delete_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
@@ -3027,19 +3042,19 @@ def test_delete_model_from_dict():
@pytest.mark.asyncio
-async def test_delete_model_async(transport: str = "grpc_asyncio"):
+async def test_delete_model_async(
+ transport: str = "grpc_asyncio", request_type=service.DeleteModelRequest
+):
client = AutoMlAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = service.DeleteModelRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.delete_model), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.delete_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
@@ -3051,12 +3066,17 @@ async def test_delete_model_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == service.DeleteModelRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
+@pytest.mark.asyncio
+async def test_delete_model_async_from_dict():
+ await test_delete_model_async(request_type=dict)
+
+
def test_delete_model_field_headers():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
@@ -3066,7 +3086,7 @@ def test_delete_model_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.delete_model), "__call__") as call:
+ with mock.patch.object(type(client.transport.delete_model), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_model(request)
@@ -3091,9 +3111,7 @@ async def test_delete_model_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.delete_model), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.delete_model), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
@@ -3114,7 +3132,7 @@ def test_delete_model_flattened():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.delete_model), "__call__") as call:
+ with mock.patch.object(type(client.transport.delete_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
@@ -3146,9 +3164,7 @@ async def test_delete_model_flattened_async():
client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.delete_model), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.delete_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
@@ -3189,7 +3205,7 @@ def test_update_model(transport: str = "grpc", request_type=service.UpdateModelR
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.update_model), "__call__") as call:
+ with mock.patch.object(type(client.transport.update_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_model.Model(
name="name_value",
@@ -3211,6 +3227,7 @@ def test_update_model(transport: str = "grpc", request_type=service.UpdateModelR
assert args[0] == service.UpdateModelRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, gca_model.Model)
assert response.name == "name_value"
@@ -3229,19 +3246,19 @@ def test_update_model_from_dict():
@pytest.mark.asyncio
-async def test_update_model_async(transport: str = "grpc_asyncio"):
+async def test_update_model_async(
+ transport: str = "grpc_asyncio", request_type=service.UpdateModelRequest
+):
client = AutoMlAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = service.UpdateModelRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.update_model), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.update_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_model.Model(
@@ -3259,7 +3276,7 @@ async def test_update_model_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == service.UpdateModelRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_model.Model)
@@ -3275,6 +3292,11 @@ async def test_update_model_async(transport: str = "grpc_asyncio"):
assert response.etag == "etag_value"
+@pytest.mark.asyncio
+async def test_update_model_async_from_dict():
+ await test_update_model_async(request_type=dict)
+
+
def test_update_model_field_headers():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
@@ -3284,7 +3306,7 @@ def test_update_model_field_headers():
request.model.name = "model.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.update_model), "__call__") as call:
+ with mock.patch.object(type(client.transport.update_model), "__call__") as call:
call.return_value = gca_model.Model()
client.update_model(request)
@@ -3309,9 +3331,7 @@ async def test_update_model_field_headers_async():
request.model.name = "model.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.update_model), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.update_model), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_model.Model())
await client.update_model(request)
@@ -3330,7 +3350,7 @@ def test_update_model_flattened():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.update_model), "__call__") as call:
+ with mock.patch.object(type(client.transport.update_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_model.Model()
@@ -3381,9 +3401,7 @@ async def test_update_model_flattened_async():
client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.update_model), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.update_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_model.Model()
@@ -3441,7 +3459,7 @@ def test_deploy_model(transport: str = "grpc", request_type=service.DeployModelR
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.deploy_model), "__call__") as call:
+ with mock.patch.object(type(client.transport.deploy_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
@@ -3462,19 +3480,19 @@ def test_deploy_model_from_dict():
@pytest.mark.asyncio
-async def test_deploy_model_async(transport: str = "grpc_asyncio"):
+async def test_deploy_model_async(
+ transport: str = "grpc_asyncio", request_type=service.DeployModelRequest
+):
client = AutoMlAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = service.DeployModelRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.deploy_model), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.deploy_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
@@ -3486,12 +3504,17 @@ async def test_deploy_model_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == service.DeployModelRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
+@pytest.mark.asyncio
+async def test_deploy_model_async_from_dict():
+ await test_deploy_model_async(request_type=dict)
+
+
def test_deploy_model_field_headers():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
@@ -3501,7 +3524,7 @@ def test_deploy_model_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.deploy_model), "__call__") as call:
+ with mock.patch.object(type(client.transport.deploy_model), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.deploy_model(request)
@@ -3526,9 +3549,7 @@ async def test_deploy_model_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.deploy_model), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.deploy_model), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
@@ -3549,7 +3570,7 @@ def test_deploy_model_flattened():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.deploy_model), "__call__") as call:
+ with mock.patch.object(type(client.transport.deploy_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
@@ -3581,9 +3602,7 @@ async def test_deploy_model_flattened_async():
client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.deploy_model), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.deploy_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
@@ -3626,7 +3645,7 @@ def test_undeploy_model(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.undeploy_model), "__call__") as call:
+ with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
@@ -3647,19 +3666,19 @@ def test_undeploy_model_from_dict():
@pytest.mark.asyncio
-async def test_undeploy_model_async(transport: str = "grpc_asyncio"):
+async def test_undeploy_model_async(
+ transport: str = "grpc_asyncio", request_type=service.UndeployModelRequest
+):
client = AutoMlAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = service.UndeployModelRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.undeploy_model), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
@@ -3671,12 +3690,17 @@ async def test_undeploy_model_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == service.UndeployModelRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
+@pytest.mark.asyncio
+async def test_undeploy_model_async_from_dict():
+ await test_undeploy_model_async(request_type=dict)
+
+
def test_undeploy_model_field_headers():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
@@ -3686,7 +3710,7 @@ def test_undeploy_model_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.undeploy_model), "__call__") as call:
+ with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.undeploy_model(request)
@@ -3711,9 +3735,7 @@ async def test_undeploy_model_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.undeploy_model), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
@@ -3734,7 +3756,7 @@ def test_undeploy_model_flattened():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.undeploy_model), "__call__") as call:
+ with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
@@ -3766,9 +3788,7 @@ async def test_undeploy_model_flattened_async():
client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.undeploy_model), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
@@ -3809,7 +3829,7 @@ def test_export_model(transport: str = "grpc", request_type=service.ExportModelR
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.export_model), "__call__") as call:
+ with mock.patch.object(type(client.transport.export_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
@@ -3830,19 +3850,19 @@ def test_export_model_from_dict():
@pytest.mark.asyncio
-async def test_export_model_async(transport: str = "grpc_asyncio"):
+async def test_export_model_async(
+ transport: str = "grpc_asyncio", request_type=service.ExportModelRequest
+):
client = AutoMlAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = service.ExportModelRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.export_model), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.export_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
@@ -3854,12 +3874,17 @@ async def test_export_model_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == service.ExportModelRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
+@pytest.mark.asyncio
+async def test_export_model_async_from_dict():
+ await test_export_model_async(request_type=dict)
+
+
def test_export_model_field_headers():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
@@ -3869,7 +3894,7 @@ def test_export_model_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.export_model), "__call__") as call:
+ with mock.patch.object(type(client.transport.export_model), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.export_model(request)
@@ -3894,9 +3919,7 @@ async def test_export_model_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.export_model), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.export_model), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
@@ -3917,7 +3940,7 @@ def test_export_model_flattened():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.export_model), "__call__") as call:
+ with mock.patch.object(type(client.transport.export_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
@@ -3968,9 +3991,7 @@ async def test_export_model_flattened_async():
client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.export_model), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.export_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
@@ -4033,7 +4054,7 @@ def test_get_model_evaluation(
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.get_model_evaluation), "__call__"
+ type(client.transport.get_model_evaluation), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = model_evaluation.ModelEvaluation(
@@ -4055,6 +4076,7 @@ def test_get_model_evaluation(
assert args[0] == service.GetModelEvaluationRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, model_evaluation.ModelEvaluation)
assert response.name == "name_value"
@@ -4071,18 +4093,20 @@ def test_get_model_evaluation_from_dict():
@pytest.mark.asyncio
-async def test_get_model_evaluation_async(transport: str = "grpc_asyncio"):
+async def test_get_model_evaluation_async(
+ transport: str = "grpc_asyncio", request_type=service.GetModelEvaluationRequest
+):
client = AutoMlAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = service.GetModelEvaluationRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.get_model_evaluation), "__call__"
+ type(client.transport.get_model_evaluation), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
@@ -4100,7 +4124,7 @@ async def test_get_model_evaluation_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == service.GetModelEvaluationRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, model_evaluation.ModelEvaluation)
@@ -4114,6 +4138,11 @@ async def test_get_model_evaluation_async(transport: str = "grpc_asyncio"):
assert response.evaluated_example_count == 2446
+@pytest.mark.asyncio
+async def test_get_model_evaluation_async_from_dict():
+ await test_get_model_evaluation_async(request_type=dict)
+
+
def test_get_model_evaluation_field_headers():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
@@ -4124,7 +4153,7 @@ def test_get_model_evaluation_field_headers():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.get_model_evaluation), "__call__"
+ type(client.transport.get_model_evaluation), "__call__"
) as call:
call.return_value = model_evaluation.ModelEvaluation()
@@ -4151,7 +4180,7 @@ async def test_get_model_evaluation_field_headers_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.get_model_evaluation), "__call__"
+ type(client.transport.get_model_evaluation), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
model_evaluation.ModelEvaluation()
@@ -4174,7 +4203,7 @@ def test_get_model_evaluation_flattened():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.get_model_evaluation), "__call__"
+ type(client.transport.get_model_evaluation), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = model_evaluation.ModelEvaluation()
@@ -4208,7 +4237,7 @@ async def test_get_model_evaluation_flattened_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.get_model_evaluation), "__call__"
+ type(client.transport.get_model_evaluation), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = model_evaluation.ModelEvaluation()
@@ -4253,7 +4282,7 @@ def test_list_model_evaluations(
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.list_model_evaluations), "__call__"
+ type(client.transport.list_model_evaluations), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = service.ListModelEvaluationsResponse(
@@ -4269,6 +4298,7 @@ def test_list_model_evaluations(
assert args[0] == service.ListModelEvaluationsRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, pagers.ListModelEvaluationsPager)
assert response.next_page_token == "next_page_token_value"
@@ -4279,18 +4309,20 @@ def test_list_model_evaluations_from_dict():
@pytest.mark.asyncio
-async def test_list_model_evaluations_async(transport: str = "grpc_asyncio"):
+async def test_list_model_evaluations_async(
+ transport: str = "grpc_asyncio", request_type=service.ListModelEvaluationsRequest
+):
client = AutoMlAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = service.ListModelEvaluationsRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.list_model_evaluations), "__call__"
+ type(client.transport.list_model_evaluations), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
@@ -4305,7 +4337,7 @@ async def test_list_model_evaluations_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == service.ListModelEvaluationsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListModelEvaluationsAsyncPager)
@@ -4313,6 +4345,11 @@ async def test_list_model_evaluations_async(transport: str = "grpc_asyncio"):
assert response.next_page_token == "next_page_token_value"
+@pytest.mark.asyncio
+async def test_list_model_evaluations_async_from_dict():
+ await test_list_model_evaluations_async(request_type=dict)
+
+
def test_list_model_evaluations_field_headers():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
@@ -4323,7 +4360,7 @@ def test_list_model_evaluations_field_headers():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.list_model_evaluations), "__call__"
+ type(client.transport.list_model_evaluations), "__call__"
) as call:
call.return_value = service.ListModelEvaluationsResponse()
@@ -4350,7 +4387,7 @@ async def test_list_model_evaluations_field_headers_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.list_model_evaluations), "__call__"
+ type(client.transport.list_model_evaluations), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListModelEvaluationsResponse()
@@ -4373,7 +4410,7 @@ def test_list_model_evaluations_flattened():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.list_model_evaluations), "__call__"
+ type(client.transport.list_model_evaluations), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = service.ListModelEvaluationsResponse()
@@ -4413,7 +4450,7 @@ async def test_list_model_evaluations_flattened_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.list_model_evaluations), "__call__"
+ type(client.transport.list_model_evaluations), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = service.ListModelEvaluationsResponse()
@@ -4456,7 +4493,7 @@ def test_list_model_evaluations_pager():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.list_model_evaluations), "__call__"
+ type(client.transport.list_model_evaluations), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
@@ -4502,7 +4539,7 @@ def test_list_model_evaluations_pages():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.list_model_evaluations), "__call__"
+ type(client.transport.list_model_evaluations), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
@@ -4540,7 +4577,7 @@ async def test_list_model_evaluations_async_pager():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.list_model_evaluations),
+ type(client.transport.list_model_evaluations),
"__call__",
new_callable=mock.AsyncMock,
) as call:
@@ -4585,7 +4622,7 @@ async def test_list_model_evaluations_async_pages():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.list_model_evaluations),
+ type(client.transport.list_model_evaluations),
"__call__",
new_callable=mock.AsyncMock,
) as call:
@@ -4657,7 +4694,7 @@ def test_transport_instance():
credentials=credentials.AnonymousCredentials(),
)
client = AutoMlClient(transport=transport)
- assert client._transport is transport
+ assert client.transport is transport
def test_transport_get_channel():
@@ -4690,7 +4727,7 @@ def test_transport_adc(transport_class):
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
- assert isinstance(client._transport, transports.AutoMlGrpcTransport,)
+ assert isinstance(client.transport, transports.AutoMlGrpcTransport,)
def test_auto_ml_base_transport_error():
@@ -4806,7 +4843,7 @@ def test_auto_ml_host_no_port():
api_endpoint="automl.googleapis.com"
),
)
- assert client._transport._host == "automl.googleapis.com:443"
+ assert client.transport._host == "automl.googleapis.com:443"
def test_auto_ml_host_with_port():
@@ -4816,7 +4853,7 @@ def test_auto_ml_host_with_port():
api_endpoint="automl.googleapis.com:8000"
),
)
- assert client._transport._host == "automl.googleapis.com:8000"
+ assert client.transport._host == "automl.googleapis.com:8000"
def test_auto_ml_grpc_transport_channel():
@@ -4828,6 +4865,7 @@ def test_auto_ml_grpc_transport_channel():
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
+ assert transport._ssl_channel_credentials == None
def test_auto_ml_grpc_asyncio_transport_channel():
@@ -4839,6 +4877,7 @@ def test_auto_ml_grpc_asyncio_transport_channel():
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
+ assert transport._ssl_channel_credentials == None
@pytest.mark.parametrize(
@@ -4881,6 +4920,7 @@ def test_auto_ml_transport_channel_mtls_with_client_cert_source(transport_class)
quota_project_id=None,
)
assert transport.grpc_channel == mock_grpc_channel
+ assert transport._ssl_channel_credentials == mock_ssl_cred
@pytest.mark.parametrize(
@@ -4924,7 +4964,7 @@ def test_auto_ml_grpc_lro_client():
client = AutoMlClient(
credentials=credentials.AnonymousCredentials(), transport="grpc",
)
- transport = client._transport
+ transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
@@ -4937,7 +4977,7 @@ def test_auto_ml_grpc_lro_async_client():
client = AutoMlAsyncClient(
credentials=credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
- transport = client._client._transport
+ transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
@@ -4946,10 +4986,42 @@ def test_auto_ml_grpc_lro_async_client():
assert transport.operations_client is transport.operations_client
-def test_dataset_path():
+def test_annotation_spec_path():
project = "squid"
location = "clam"
dataset = "whelk"
+ annotation_spec = "octopus"
+
+ expected = "projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}".format(
+ project=project,
+ location=location,
+ dataset=dataset,
+ annotation_spec=annotation_spec,
+ )
+ actual = AutoMlClient.annotation_spec_path(
+ project, location, dataset, annotation_spec
+ )
+ assert expected == actual
+
+
+def test_parse_annotation_spec_path():
+ expected = {
+ "project": "oyster",
+ "location": "nudibranch",
+ "dataset": "cuttlefish",
+ "annotation_spec": "mussel",
+ }
+ path = AutoMlClient.annotation_spec_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = AutoMlClient.parse_annotation_spec_path(path)
+ assert expected == actual
+
+
+def test_dataset_path():
+ project = "winkle"
+ location = "nautilus"
+ dataset = "scallop"
expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(
project=project, location=location, dataset=dataset,
@@ -4960,9 +5032,9 @@ def test_dataset_path():
def test_parse_dataset_path():
expected = {
- "project": "octopus",
- "location": "oyster",
- "dataset": "nudibranch",
+ "project": "abalone",
+ "location": "squid",
+ "dataset": "clam",
}
path = AutoMlClient.dataset_path(**expected)
@@ -4972,9 +5044,9 @@ def test_parse_dataset_path():
def test_model_path():
- project = "squid"
- location = "clam"
- model = "whelk"
+ project = "whelk"
+ location = "octopus"
+ model = "oyster"
expected = "projects/{project}/locations/{location}/models/{model}".format(
project=project, location=location, model=model,
@@ -4985,9 +5057,9 @@ def test_model_path():
def test_parse_model_path():
expected = {
- "project": "octopus",
- "location": "oyster",
- "model": "nudibranch",
+ "project": "nudibranch",
+ "location": "cuttlefish",
+ "model": "mussel",
}
path = AutoMlClient.model_path(**expected)
@@ -4996,6 +5068,139 @@ def test_parse_model_path():
assert expected == actual
+def test_model_evaluation_path():
+ project = "winkle"
+ location = "nautilus"
+ model = "scallop"
+ model_evaluation = "abalone"
+
+ expected = "projects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}".format(
+ project=project,
+ location=location,
+ model=model,
+ model_evaluation=model_evaluation,
+ )
+ actual = AutoMlClient.model_evaluation_path(
+ project, location, model, model_evaluation
+ )
+ assert expected == actual
+
+
+def test_parse_model_evaluation_path():
+ expected = {
+ "project": "squid",
+ "location": "clam",
+ "model": "whelk",
+ "model_evaluation": "octopus",
+ }
+ path = AutoMlClient.model_evaluation_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = AutoMlClient.parse_model_evaluation_path(path)
+ assert expected == actual
+
+
+def test_common_billing_account_path():
+ billing_account = "oyster"
+
+ expected = "billingAccounts/{billing_account}".format(
+ billing_account=billing_account,
+ )
+ actual = AutoMlClient.common_billing_account_path(billing_account)
+ assert expected == actual
+
+
+def test_parse_common_billing_account_path():
+ expected = {
+ "billing_account": "nudibranch",
+ }
+ path = AutoMlClient.common_billing_account_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = AutoMlClient.parse_common_billing_account_path(path)
+ assert expected == actual
+
+
+def test_common_folder_path():
+ folder = "cuttlefish"
+
+ expected = "folders/{folder}".format(folder=folder,)
+ actual = AutoMlClient.common_folder_path(folder)
+ assert expected == actual
+
+
+def test_parse_common_folder_path():
+ expected = {
+ "folder": "mussel",
+ }
+ path = AutoMlClient.common_folder_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = AutoMlClient.parse_common_folder_path(path)
+ assert expected == actual
+
+
+def test_common_organization_path():
+ organization = "winkle"
+
+ expected = "organizations/{organization}".format(organization=organization,)
+ actual = AutoMlClient.common_organization_path(organization)
+ assert expected == actual
+
+
+def test_parse_common_organization_path():
+ expected = {
+ "organization": "nautilus",
+ }
+ path = AutoMlClient.common_organization_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = AutoMlClient.parse_common_organization_path(path)
+ assert expected == actual
+
+
+def test_common_project_path():
+ project = "scallop"
+
+ expected = "projects/{project}".format(project=project,)
+ actual = AutoMlClient.common_project_path(project)
+ assert expected == actual
+
+
+def test_parse_common_project_path():
+ expected = {
+ "project": "abalone",
+ }
+ path = AutoMlClient.common_project_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = AutoMlClient.parse_common_project_path(path)
+ assert expected == actual
+
+
+def test_common_location_path():
+ project = "squid"
+ location = "clam"
+
+ expected = "projects/{project}/locations/{location}".format(
+ project=project, location=location,
+ )
+ actual = AutoMlClient.common_location_path(project, location)
+ assert expected == actual
+
+
+def test_parse_common_location_path():
+ expected = {
+ "project": "whelk",
+ "location": "octopus",
+ }
+ path = AutoMlClient.common_location_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = AutoMlClient.parse_common_location_path(path)
+ assert expected == actual
+
+
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
diff --git a/tests/unit/gapic/automl_v1/test_prediction_service.py b/tests/unit/gapic/automl_v1/test_prediction_service.py
index fd886203..56857180 100644
--- a/tests/unit/gapic/automl_v1/test_prediction_service.py
+++ b/tests/unit/gapic/automl_v1/test_prediction_service.py
@@ -106,12 +106,12 @@ def test_prediction_service_client_from_service_account_file(client_class):
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
- assert client._transport._credentials == creds
+ assert client.transport._credentials == creds
client = client_class.from_service_account_json("dummy/file/path.json")
- assert client._transport._credentials == creds
+ assert client.transport._credentials == creds
- assert client._transport._host == "automl.googleapis.com:443"
+ assert client.transport._host == "automl.googleapis.com:443"
def test_prediction_service_client_get_transport_class():
@@ -471,7 +471,7 @@ def test_predict(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.predict), "__call__") as call:
+ with mock.patch.object(type(client.transport.predict), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = prediction_service.PredictResponse()
@@ -484,6 +484,7 @@ def test_predict(
assert args[0] == prediction_service.PredictRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, prediction_service.PredictResponse)
@@ -492,17 +493,19 @@ def test_predict_from_dict():
@pytest.mark.asyncio
-async def test_predict_async(transport: str = "grpc_asyncio"):
+async def test_predict_async(
+ transport: str = "grpc_asyncio", request_type=prediction_service.PredictRequest
+):
client = PredictionServiceAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = prediction_service.PredictRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._client._transport.predict), "__call__") as call:
+ with mock.patch.object(type(client.transport.predict), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
prediction_service.PredictResponse()
@@ -514,12 +517,17 @@ async def test_predict_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == prediction_service.PredictRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, prediction_service.PredictResponse)
+@pytest.mark.asyncio
+async def test_predict_async_from_dict():
+ await test_predict_async(request_type=dict)
+
+
def test_predict_field_headers():
client = PredictionServiceClient(credentials=credentials.AnonymousCredentials(),)
@@ -529,7 +537,7 @@ def test_predict_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.predict), "__call__") as call:
+ with mock.patch.object(type(client.transport.predict), "__call__") as call:
call.return_value = prediction_service.PredictResponse()
client.predict(request)
@@ -556,7 +564,7 @@ async def test_predict_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._client._transport.predict), "__call__") as call:
+ with mock.patch.object(type(client.transport.predict), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
prediction_service.PredictResponse()
)
@@ -577,7 +585,7 @@ def test_predict_flattened():
client = PredictionServiceClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.predict), "__call__") as call:
+ with mock.patch.object(type(client.transport.predict), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = prediction_service.PredictResponse()
@@ -628,7 +636,7 @@ async def test_predict_flattened_async():
)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._client._transport.predict), "__call__") as call:
+ with mock.patch.object(type(client.transport.predict), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = prediction_service.PredictResponse()
@@ -690,7 +698,7 @@ def test_batch_predict(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.batch_predict), "__call__") as call:
+ with mock.patch.object(type(client.transport.batch_predict), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
@@ -711,19 +719,19 @@ def test_batch_predict_from_dict():
@pytest.mark.asyncio
-async def test_batch_predict_async(transport: str = "grpc_asyncio"):
+async def test_batch_predict_async(
+ transport: str = "grpc_asyncio", request_type=prediction_service.BatchPredictRequest
+):
client = PredictionServiceAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = prediction_service.BatchPredictRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.batch_predict), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.batch_predict), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
@@ -735,12 +743,17 @@ async def test_batch_predict_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == prediction_service.BatchPredictRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
+@pytest.mark.asyncio
+async def test_batch_predict_async_from_dict():
+ await test_batch_predict_async(request_type=dict)
+
+
def test_batch_predict_field_headers():
client = PredictionServiceClient(credentials=credentials.AnonymousCredentials(),)
@@ -750,7 +763,7 @@ def test_batch_predict_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.batch_predict), "__call__") as call:
+ with mock.patch.object(type(client.transport.batch_predict), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.batch_predict(request)
@@ -777,9 +790,7 @@ async def test_batch_predict_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.batch_predict), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.batch_predict), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
@@ -800,7 +811,7 @@ def test_batch_predict_flattened():
client = PredictionServiceClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.batch_predict), "__call__") as call:
+ with mock.patch.object(type(client.transport.batch_predict), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
@@ -867,9 +878,7 @@ async def test_batch_predict_flattened_async():
)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.batch_predict), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.batch_predict), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
@@ -971,7 +980,7 @@ def test_transport_instance():
credentials=credentials.AnonymousCredentials(),
)
client = PredictionServiceClient(transport=transport)
- assert client._transport is transport
+ assert client.transport is transport
def test_transport_get_channel():
@@ -1007,7 +1016,7 @@ def test_transport_adc(transport_class):
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = PredictionServiceClient(credentials=credentials.AnonymousCredentials(),)
- assert isinstance(client._transport, transports.PredictionServiceGrpcTransport,)
+ assert isinstance(client.transport, transports.PredictionServiceGrpcTransport,)
def test_prediction_service_base_transport_error():
@@ -1107,7 +1116,7 @@ def test_prediction_service_host_no_port():
api_endpoint="automl.googleapis.com"
),
)
- assert client._transport._host == "automl.googleapis.com:443"
+ assert client.transport._host == "automl.googleapis.com:443"
def test_prediction_service_host_with_port():
@@ -1117,7 +1126,7 @@ def test_prediction_service_host_with_port():
api_endpoint="automl.googleapis.com:8000"
),
)
- assert client._transport._host == "automl.googleapis.com:8000"
+ assert client.transport._host == "automl.googleapis.com:8000"
def test_prediction_service_grpc_transport_channel():
@@ -1129,6 +1138,7 @@ def test_prediction_service_grpc_transport_channel():
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
+ assert transport._ssl_channel_credentials == None
def test_prediction_service_grpc_asyncio_transport_channel():
@@ -1140,6 +1150,7 @@ def test_prediction_service_grpc_asyncio_transport_channel():
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
+ assert transport._ssl_channel_credentials == None
@pytest.mark.parametrize(
@@ -1187,6 +1198,7 @@ def test_prediction_service_transport_channel_mtls_with_client_cert_source(
quota_project_id=None,
)
assert transport.grpc_channel == mock_grpc_channel
+ assert transport._ssl_channel_credentials == mock_ssl_cred
@pytest.mark.parametrize(
@@ -1233,7 +1245,7 @@ def test_prediction_service_grpc_lro_client():
client = PredictionServiceClient(
credentials=credentials.AnonymousCredentials(), transport="grpc",
)
- transport = client._transport
+ transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
@@ -1246,7 +1258,7 @@ def test_prediction_service_grpc_lro_async_client():
client = PredictionServiceAsyncClient(
credentials=credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
- transport = client._client._transport
+ transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
@@ -1255,6 +1267,132 @@ def test_prediction_service_grpc_lro_async_client():
assert transport.operations_client is transport.operations_client
+def test_model_path():
+ project = "squid"
+ location = "clam"
+ model = "whelk"
+
+ expected = "projects/{project}/locations/{location}/models/{model}".format(
+ project=project, location=location, model=model,
+ )
+ actual = PredictionServiceClient.model_path(project, location, model)
+ assert expected == actual
+
+
+def test_parse_model_path():
+ expected = {
+ "project": "octopus",
+ "location": "oyster",
+ "model": "nudibranch",
+ }
+ path = PredictionServiceClient.model_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = PredictionServiceClient.parse_model_path(path)
+ assert expected == actual
+
+
+def test_common_billing_account_path():
+ billing_account = "cuttlefish"
+
+ expected = "billingAccounts/{billing_account}".format(
+ billing_account=billing_account,
+ )
+ actual = PredictionServiceClient.common_billing_account_path(billing_account)
+ assert expected == actual
+
+
+def test_parse_common_billing_account_path():
+ expected = {
+ "billing_account": "mussel",
+ }
+ path = PredictionServiceClient.common_billing_account_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = PredictionServiceClient.parse_common_billing_account_path(path)
+ assert expected == actual
+
+
+def test_common_folder_path():
+ folder = "winkle"
+
+ expected = "folders/{folder}".format(folder=folder,)
+ actual = PredictionServiceClient.common_folder_path(folder)
+ assert expected == actual
+
+
+def test_parse_common_folder_path():
+ expected = {
+ "folder": "nautilus",
+ }
+ path = PredictionServiceClient.common_folder_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = PredictionServiceClient.parse_common_folder_path(path)
+ assert expected == actual
+
+
+def test_common_organization_path():
+ organization = "scallop"
+
+ expected = "organizations/{organization}".format(organization=organization,)
+ actual = PredictionServiceClient.common_organization_path(organization)
+ assert expected == actual
+
+
+def test_parse_common_organization_path():
+ expected = {
+ "organization": "abalone",
+ }
+ path = PredictionServiceClient.common_organization_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = PredictionServiceClient.parse_common_organization_path(path)
+ assert expected == actual
+
+
+def test_common_project_path():
+ project = "squid"
+
+ expected = "projects/{project}".format(project=project,)
+ actual = PredictionServiceClient.common_project_path(project)
+ assert expected == actual
+
+
+def test_parse_common_project_path():
+ expected = {
+ "project": "clam",
+ }
+ path = PredictionServiceClient.common_project_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = PredictionServiceClient.parse_common_project_path(path)
+ assert expected == actual
+
+
+def test_common_location_path():
+ project = "whelk"
+ location = "octopus"
+
+ expected = "projects/{project}/locations/{location}".format(
+ project=project, location=location,
+ )
+ actual = PredictionServiceClient.common_location_path(project, location)
+ assert expected == actual
+
+
+def test_parse_common_location_path():
+ expected = {
+ "project": "oyster",
+ "location": "nudibranch",
+ }
+ path = PredictionServiceClient.common_location_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = PredictionServiceClient.parse_common_location_path(path)
+ assert expected == actual
+
+
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
diff --git a/tests/unit/gapic/automl_v1beta1/test_auto_ml.py b/tests/unit/gapic/automl_v1beta1/test_auto_ml.py
index 09cb0749..3486adde 100644
--- a/tests/unit/gapic/automl_v1beta1/test_auto_ml.py
+++ b/tests/unit/gapic/automl_v1beta1/test_auto_ml.py
@@ -44,7 +44,6 @@
from google.cloud.automl_v1beta1.types import column_spec
from google.cloud.automl_v1beta1.types import column_spec as gca_column_spec
from google.cloud.automl_v1beta1.types import data_stats
-from google.cloud.automl_v1beta1.types import data_stats as gca_data_stats
from google.cloud.automl_v1beta1.types import data_types
from google.cloud.automl_v1beta1.types import dataset
from google.cloud.automl_v1beta1.types import dataset as gca_dataset
@@ -117,12 +116,12 @@ def test_auto_ml_client_from_service_account_file(client_class):
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
- assert client._transport._credentials == creds
+ assert client.transport._credentials == creds
client = client_class.from_service_account_json("dummy/file/path.json")
- assert client._transport._credentials == creds
+ assert client.transport._credentials == creds
- assert client._transport._host == "automl.googleapis.com:443"
+ assert client.transport._host == "automl.googleapis.com:443"
def test_auto_ml_client_get_transport_class():
@@ -448,7 +447,7 @@ def test_create_dataset(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.create_dataset), "__call__") as call:
+ with mock.patch.object(type(client.transport.create_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_dataset.Dataset(
name="name_value",
@@ -470,6 +469,7 @@ def test_create_dataset(
assert args[0] == service.CreateDatasetRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, gca_dataset.Dataset)
assert response.name == "name_value"
@@ -488,19 +488,19 @@ def test_create_dataset_from_dict():
@pytest.mark.asyncio
-async def test_create_dataset_async(transport: str = "grpc_asyncio"):
+async def test_create_dataset_async(
+ transport: str = "grpc_asyncio", request_type=service.CreateDatasetRequest
+):
client = AutoMlAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = service.CreateDatasetRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.create_dataset), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.create_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_dataset.Dataset(
@@ -518,7 +518,7 @@ async def test_create_dataset_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == service.CreateDatasetRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_dataset.Dataset)
@@ -534,6 +534,11 @@ async def test_create_dataset_async(transport: str = "grpc_asyncio"):
assert response.etag == "etag_value"
+@pytest.mark.asyncio
+async def test_create_dataset_async_from_dict():
+ await test_create_dataset_async(request_type=dict)
+
+
def test_create_dataset_field_headers():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
@@ -543,7 +548,7 @@ def test_create_dataset_field_headers():
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.create_dataset), "__call__") as call:
+ with mock.patch.object(type(client.transport.create_dataset), "__call__") as call:
call.return_value = gca_dataset.Dataset()
client.create_dataset(request)
@@ -568,9 +573,7 @@ async def test_create_dataset_field_headers_async():
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.create_dataset), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.create_dataset), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset())
await client.create_dataset(request)
@@ -589,7 +592,7 @@ def test_create_dataset_flattened():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.create_dataset), "__call__") as call:
+ with mock.patch.object(type(client.transport.create_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_dataset.Dataset()
@@ -640,9 +643,7 @@ async def test_create_dataset_flattened_async():
client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.create_dataset), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.create_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_dataset.Dataset()
@@ -700,7 +701,7 @@ def test_get_dataset(transport: str = "grpc", request_type=service.GetDatasetReq
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.get_dataset), "__call__") as call:
+ with mock.patch.object(type(client.transport.get_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = dataset.Dataset(
name="name_value",
@@ -722,6 +723,7 @@ def test_get_dataset(transport: str = "grpc", request_type=service.GetDatasetReq
assert args[0] == service.GetDatasetRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, dataset.Dataset)
assert response.name == "name_value"
@@ -740,19 +742,19 @@ def test_get_dataset_from_dict():
@pytest.mark.asyncio
-async def test_get_dataset_async(transport: str = "grpc_asyncio"):
+async def test_get_dataset_async(
+ transport: str = "grpc_asyncio", request_type=service.GetDatasetRequest
+):
client = AutoMlAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = service.GetDatasetRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.get_dataset), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.get_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
dataset.Dataset(
@@ -770,7 +772,7 @@ async def test_get_dataset_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == service.GetDatasetRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, dataset.Dataset)
@@ -786,6 +788,11 @@ async def test_get_dataset_async(transport: str = "grpc_asyncio"):
assert response.etag == "etag_value"
+@pytest.mark.asyncio
+async def test_get_dataset_async_from_dict():
+ await test_get_dataset_async(request_type=dict)
+
+
def test_get_dataset_field_headers():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
@@ -795,7 +802,7 @@ def test_get_dataset_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.get_dataset), "__call__") as call:
+ with mock.patch.object(type(client.transport.get_dataset), "__call__") as call:
call.return_value = dataset.Dataset()
client.get_dataset(request)
@@ -820,9 +827,7 @@ async def test_get_dataset_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.get_dataset), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.get_dataset), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(dataset.Dataset())
await client.get_dataset(request)
@@ -841,7 +846,7 @@ def test_get_dataset_flattened():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.get_dataset), "__call__") as call:
+ with mock.patch.object(type(client.transport.get_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = dataset.Dataset()
@@ -873,9 +878,7 @@ async def test_get_dataset_flattened_async():
client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.get_dataset), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.get_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = dataset.Dataset()
@@ -916,7 +919,7 @@ def test_list_datasets(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_datasets), "__call__") as call:
+ with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = service.ListDatasetsResponse(
next_page_token="next_page_token_value",
@@ -931,6 +934,7 @@ def test_list_datasets(
assert args[0] == service.ListDatasetsRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, pagers.ListDatasetsPager)
assert response.next_page_token == "next_page_token_value"
@@ -941,19 +945,19 @@ def test_list_datasets_from_dict():
@pytest.mark.asyncio
-async def test_list_datasets_async(transport: str = "grpc_asyncio"):
+async def test_list_datasets_async(
+ transport: str = "grpc_asyncio", request_type=service.ListDatasetsRequest
+):
client = AutoMlAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = service.ListDatasetsRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.list_datasets), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListDatasetsResponse(next_page_token="next_page_token_value",)
@@ -965,7 +969,7 @@ async def test_list_datasets_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == service.ListDatasetsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListDatasetsAsyncPager)
@@ -973,6 +977,11 @@ async def test_list_datasets_async(transport: str = "grpc_asyncio"):
assert response.next_page_token == "next_page_token_value"
+@pytest.mark.asyncio
+async def test_list_datasets_async_from_dict():
+ await test_list_datasets_async(request_type=dict)
+
+
def test_list_datasets_field_headers():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
@@ -982,7 +991,7 @@ def test_list_datasets_field_headers():
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_datasets), "__call__") as call:
+ with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
call.return_value = service.ListDatasetsResponse()
client.list_datasets(request)
@@ -1007,9 +1016,7 @@ async def test_list_datasets_field_headers_async():
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.list_datasets), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListDatasetsResponse()
)
@@ -1030,7 +1037,7 @@ def test_list_datasets_flattened():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_datasets), "__call__") as call:
+ with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = service.ListDatasetsResponse()
@@ -1062,9 +1069,7 @@ async def test_list_datasets_flattened_async():
client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.list_datasets), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = service.ListDatasetsResponse()
@@ -1099,7 +1104,7 @@ def test_list_datasets_pager():
client = AutoMlClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_datasets), "__call__") as call:
+ with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListDatasetsResponse(
@@ -1133,7 +1138,7 @@ def test_list_datasets_pages():
client = AutoMlClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_datasets), "__call__") as call:
+ with mock.patch.object(type(client.transport.list_datasets), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListDatasetsResponse(
@@ -1160,9 +1165,7 @@ async def test_list_datasets_async_pager():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.list_datasets),
- "__call__",
- new_callable=mock.AsyncMock,
+ type(client.transport.list_datasets), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
@@ -1195,9 +1198,7 @@ async def test_list_datasets_async_pages():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.list_datasets),
- "__call__",
- new_callable=mock.AsyncMock,
+ type(client.transport.list_datasets), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
@@ -1233,7 +1234,7 @@ def test_update_dataset(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.update_dataset), "__call__") as call:
+ with mock.patch.object(type(client.transport.update_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_dataset.Dataset(
name="name_value",
@@ -1255,6 +1256,7 @@ def test_update_dataset(
assert args[0] == service.UpdateDatasetRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, gca_dataset.Dataset)
assert response.name == "name_value"
@@ -1273,19 +1275,19 @@ def test_update_dataset_from_dict():
@pytest.mark.asyncio
-async def test_update_dataset_async(transport: str = "grpc_asyncio"):
+async def test_update_dataset_async(
+ transport: str = "grpc_asyncio", request_type=service.UpdateDatasetRequest
+):
client = AutoMlAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = service.UpdateDatasetRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.update_dataset), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.update_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_dataset.Dataset(
@@ -1303,7 +1305,7 @@ async def test_update_dataset_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == service.UpdateDatasetRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_dataset.Dataset)
@@ -1319,6 +1321,11 @@ async def test_update_dataset_async(transport: str = "grpc_asyncio"):
assert response.etag == "etag_value"
+@pytest.mark.asyncio
+async def test_update_dataset_async_from_dict():
+ await test_update_dataset_async(request_type=dict)
+
+
def test_update_dataset_field_headers():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
@@ -1328,7 +1335,7 @@ def test_update_dataset_field_headers():
request.dataset.name = "dataset.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.update_dataset), "__call__") as call:
+ with mock.patch.object(type(client.transport.update_dataset), "__call__") as call:
call.return_value = gca_dataset.Dataset()
client.update_dataset(request)
@@ -1355,9 +1362,7 @@ async def test_update_dataset_field_headers_async():
request.dataset.name = "dataset.name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.update_dataset), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.update_dataset), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gca_dataset.Dataset())
await client.update_dataset(request)
@@ -1378,7 +1383,7 @@ def test_update_dataset_flattened():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.update_dataset), "__call__") as call:
+ with mock.patch.object(type(client.transport.update_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_dataset.Dataset()
@@ -1425,9 +1430,7 @@ async def test_update_dataset_flattened_async():
client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.update_dataset), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.update_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = gca_dataset.Dataset()
@@ -1483,7 +1486,7 @@ def test_delete_dataset(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.delete_dataset), "__call__") as call:
+ with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
@@ -1504,19 +1507,19 @@ def test_delete_dataset_from_dict():
@pytest.mark.asyncio
-async def test_delete_dataset_async(transport: str = "grpc_asyncio"):
+async def test_delete_dataset_async(
+ transport: str = "grpc_asyncio", request_type=service.DeleteDatasetRequest
+):
client = AutoMlAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = service.DeleteDatasetRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.delete_dataset), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
@@ -1528,12 +1531,17 @@ async def test_delete_dataset_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == service.DeleteDatasetRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
+@pytest.mark.asyncio
+async def test_delete_dataset_async_from_dict():
+ await test_delete_dataset_async(request_type=dict)
+
+
def test_delete_dataset_field_headers():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
@@ -1543,7 +1551,7 @@ def test_delete_dataset_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.delete_dataset), "__call__") as call:
+ with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_dataset(request)
@@ -1568,9 +1576,7 @@ async def test_delete_dataset_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.delete_dataset), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
@@ -1591,7 +1597,7 @@ def test_delete_dataset_flattened():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.delete_dataset), "__call__") as call:
+ with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
@@ -1623,9 +1629,7 @@ async def test_delete_dataset_flattened_async():
client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.delete_dataset), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.delete_dataset), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
@@ -1666,7 +1670,7 @@ def test_import_data(transport: str = "grpc", request_type=service.ImportDataReq
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.import_data), "__call__") as call:
+ with mock.patch.object(type(client.transport.import_data), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
@@ -1687,19 +1691,19 @@ def test_import_data_from_dict():
@pytest.mark.asyncio
-async def test_import_data_async(transport: str = "grpc_asyncio"):
+async def test_import_data_async(
+ transport: str = "grpc_asyncio", request_type=service.ImportDataRequest
+):
client = AutoMlAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = service.ImportDataRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.import_data), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.import_data), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
@@ -1711,12 +1715,17 @@ async def test_import_data_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == service.ImportDataRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
+@pytest.mark.asyncio
+async def test_import_data_async_from_dict():
+ await test_import_data_async(request_type=dict)
+
+
def test_import_data_field_headers():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
@@ -1726,7 +1735,7 @@ def test_import_data_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.import_data), "__call__") as call:
+ with mock.patch.object(type(client.transport.import_data), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.import_data(request)
@@ -1751,9 +1760,7 @@ async def test_import_data_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.import_data), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.import_data), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
@@ -1774,7 +1781,7 @@ def test_import_data_flattened():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.import_data), "__call__") as call:
+ with mock.patch.object(type(client.transport.import_data), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
@@ -1819,9 +1826,7 @@ async def test_import_data_flattened_async():
client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.import_data), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.import_data), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
@@ -1875,7 +1880,7 @@ def test_export_data(transport: str = "grpc", request_type=service.ExportDataReq
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.export_data), "__call__") as call:
+ with mock.patch.object(type(client.transport.export_data), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
@@ -1896,19 +1901,19 @@ def test_export_data_from_dict():
@pytest.mark.asyncio
-async def test_export_data_async(transport: str = "grpc_asyncio"):
+async def test_export_data_async(
+ transport: str = "grpc_asyncio", request_type=service.ExportDataRequest
+):
client = AutoMlAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = service.ExportDataRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.export_data), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.export_data), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
@@ -1920,12 +1925,17 @@ async def test_export_data_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == service.ExportDataRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
+@pytest.mark.asyncio
+async def test_export_data_async_from_dict():
+ await test_export_data_async(request_type=dict)
+
+
def test_export_data_field_headers():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
@@ -1935,7 +1945,7 @@ def test_export_data_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.export_data), "__call__") as call:
+ with mock.patch.object(type(client.transport.export_data), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.export_data(request)
@@ -1960,9 +1970,7 @@ async def test_export_data_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.export_data), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.export_data), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
@@ -1983,7 +1991,7 @@ def test_export_data_flattened():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.export_data), "__call__") as call:
+ with mock.patch.object(type(client.transport.export_data), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
@@ -2034,9 +2042,7 @@ async def test_export_data_flattened_async():
client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.export_data), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.export_data), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
@@ -2099,7 +2105,7 @@ def test_get_annotation_spec(
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.get_annotation_spec), "__call__"
+ type(client.transport.get_annotation_spec), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = annotation_spec.AnnotationSpec(
@@ -2115,6 +2121,7 @@ def test_get_annotation_spec(
assert args[0] == service.GetAnnotationSpecRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, annotation_spec.AnnotationSpec)
assert response.name == "name_value"
@@ -2129,18 +2136,20 @@ def test_get_annotation_spec_from_dict():
@pytest.mark.asyncio
-async def test_get_annotation_spec_async(transport: str = "grpc_asyncio"):
+async def test_get_annotation_spec_async(
+ transport: str = "grpc_asyncio", request_type=service.GetAnnotationSpecRequest
+):
client = AutoMlAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = service.GetAnnotationSpecRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.get_annotation_spec), "__call__"
+ type(client.transport.get_annotation_spec), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
@@ -2157,7 +2166,7 @@ async def test_get_annotation_spec_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == service.GetAnnotationSpecRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, annotation_spec.AnnotationSpec)
@@ -2169,6 +2178,11 @@ async def test_get_annotation_spec_async(transport: str = "grpc_asyncio"):
assert response.example_count == 1396
+@pytest.mark.asyncio
+async def test_get_annotation_spec_async_from_dict():
+ await test_get_annotation_spec_async(request_type=dict)
+
+
def test_get_annotation_spec_field_headers():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
@@ -2179,7 +2193,7 @@ def test_get_annotation_spec_field_headers():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.get_annotation_spec), "__call__"
+ type(client.transport.get_annotation_spec), "__call__"
) as call:
call.return_value = annotation_spec.AnnotationSpec()
@@ -2206,7 +2220,7 @@ async def test_get_annotation_spec_field_headers_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.get_annotation_spec), "__call__"
+ type(client.transport.get_annotation_spec), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
annotation_spec.AnnotationSpec()
@@ -2229,7 +2243,7 @@ def test_get_annotation_spec_flattened():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.get_annotation_spec), "__call__"
+ type(client.transport.get_annotation_spec), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = annotation_spec.AnnotationSpec()
@@ -2263,7 +2277,7 @@ async def test_get_annotation_spec_flattened_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.get_annotation_spec), "__call__"
+ type(client.transport.get_annotation_spec), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = annotation_spec.AnnotationSpec()
@@ -2307,7 +2321,7 @@ def test_get_table_spec(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.get_table_spec), "__call__") as call:
+ with mock.patch.object(type(client.transport.get_table_spec), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = table_spec.TableSpec(
name="name_value",
@@ -2327,6 +2341,7 @@ def test_get_table_spec(
assert args[0] == service.GetTableSpecRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, table_spec.TableSpec)
assert response.name == "name_value"
@@ -2347,19 +2362,19 @@ def test_get_table_spec_from_dict():
@pytest.mark.asyncio
-async def test_get_table_spec_async(transport: str = "grpc_asyncio"):
+async def test_get_table_spec_async(
+ transport: str = "grpc_asyncio", request_type=service.GetTableSpecRequest
+):
client = AutoMlAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = service.GetTableSpecRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.get_table_spec), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.get_table_spec), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
table_spec.TableSpec(
@@ -2378,7 +2393,7 @@ async def test_get_table_spec_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == service.GetTableSpecRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, table_spec.TableSpec)
@@ -2396,6 +2411,11 @@ async def test_get_table_spec_async(transport: str = "grpc_asyncio"):
assert response.etag == "etag_value"
+@pytest.mark.asyncio
+async def test_get_table_spec_async_from_dict():
+ await test_get_table_spec_async(request_type=dict)
+
+
def test_get_table_spec_field_headers():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
@@ -2405,7 +2425,7 @@ def test_get_table_spec_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.get_table_spec), "__call__") as call:
+ with mock.patch.object(type(client.transport.get_table_spec), "__call__") as call:
call.return_value = table_spec.TableSpec()
client.get_table_spec(request)
@@ -2430,9 +2450,7 @@ async def test_get_table_spec_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.get_table_spec), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.get_table_spec), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
table_spec.TableSpec()
)
@@ -2453,7 +2471,7 @@ def test_get_table_spec_flattened():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.get_table_spec), "__call__") as call:
+ with mock.patch.object(type(client.transport.get_table_spec), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = table_spec.TableSpec()
@@ -2485,9 +2503,7 @@ async def test_get_table_spec_flattened_async():
client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.get_table_spec), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.get_table_spec), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = table_spec.TableSpec()
@@ -2530,9 +2546,7 @@ def test_list_table_specs(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._transport.list_table_specs), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.list_table_specs), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = service.ListTableSpecsResponse(
next_page_token="next_page_token_value",
@@ -2547,6 +2561,7 @@ def test_list_table_specs(
assert args[0] == service.ListTableSpecsRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, pagers.ListTableSpecsPager)
assert response.next_page_token == "next_page_token_value"
@@ -2557,19 +2572,19 @@ def test_list_table_specs_from_dict():
@pytest.mark.asyncio
-async def test_list_table_specs_async(transport: str = "grpc_asyncio"):
+async def test_list_table_specs_async(
+ transport: str = "grpc_asyncio", request_type=service.ListTableSpecsRequest
+):
client = AutoMlAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = service.ListTableSpecsRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.list_table_specs), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.list_table_specs), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListTableSpecsResponse(next_page_token="next_page_token_value",)
@@ -2581,7 +2596,7 @@ async def test_list_table_specs_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == service.ListTableSpecsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListTableSpecsAsyncPager)
@@ -2589,6 +2604,11 @@ async def test_list_table_specs_async(transport: str = "grpc_asyncio"):
assert response.next_page_token == "next_page_token_value"
+@pytest.mark.asyncio
+async def test_list_table_specs_async_from_dict():
+ await test_list_table_specs_async(request_type=dict)
+
+
def test_list_table_specs_field_headers():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
@@ -2598,9 +2618,7 @@ def test_list_table_specs_field_headers():
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._transport.list_table_specs), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.list_table_specs), "__call__") as call:
call.return_value = service.ListTableSpecsResponse()
client.list_table_specs(request)
@@ -2625,9 +2643,7 @@ async def test_list_table_specs_field_headers_async():
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.list_table_specs), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.list_table_specs), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListTableSpecsResponse()
)
@@ -2648,9 +2664,7 @@ def test_list_table_specs_flattened():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._transport.list_table_specs), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.list_table_specs), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = service.ListTableSpecsResponse()
@@ -2682,9 +2696,7 @@ async def test_list_table_specs_flattened_async():
client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.list_table_specs), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.list_table_specs), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = service.ListTableSpecsResponse()
@@ -2719,9 +2731,7 @@ def test_list_table_specs_pager():
client = AutoMlClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._transport.list_table_specs), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.list_table_specs), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListTableSpecsResponse(
@@ -2759,9 +2769,7 @@ def test_list_table_specs_pages():
client = AutoMlClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._transport.list_table_specs), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.list_table_specs), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListTableSpecsResponse(
@@ -2792,9 +2800,7 @@ async def test_list_table_specs_async_pager():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.list_table_specs),
- "__call__",
- new_callable=mock.AsyncMock,
+ type(client.transport.list_table_specs), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
@@ -2831,9 +2837,7 @@ async def test_list_table_specs_async_pages():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.list_table_specs),
- "__call__",
- new_callable=mock.AsyncMock,
+ type(client.transport.list_table_specs), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
@@ -2874,7 +2878,7 @@ def test_update_table_spec(
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.update_table_spec), "__call__"
+ type(client.transport.update_table_spec), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_table_spec.TableSpec(
@@ -2895,6 +2899,7 @@ def test_update_table_spec(
assert args[0] == service.UpdateTableSpecRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, gca_table_spec.TableSpec)
assert response.name == "name_value"
@@ -2915,18 +2920,20 @@ def test_update_table_spec_from_dict():
@pytest.mark.asyncio
-async def test_update_table_spec_async(transport: str = "grpc_asyncio"):
+async def test_update_table_spec_async(
+ transport: str = "grpc_asyncio", request_type=service.UpdateTableSpecRequest
+):
client = AutoMlAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = service.UpdateTableSpecRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.update_table_spec), "__call__"
+ type(client.transport.update_table_spec), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
@@ -2946,7 +2953,7 @@ async def test_update_table_spec_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == service.UpdateTableSpecRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_table_spec.TableSpec)
@@ -2964,6 +2971,11 @@ async def test_update_table_spec_async(transport: str = "grpc_asyncio"):
assert response.etag == "etag_value"
+@pytest.mark.asyncio
+async def test_update_table_spec_async_from_dict():
+ await test_update_table_spec_async(request_type=dict)
+
+
def test_update_table_spec_field_headers():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
@@ -2974,7 +2986,7 @@ def test_update_table_spec_field_headers():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.update_table_spec), "__call__"
+ type(client.transport.update_table_spec), "__call__"
) as call:
call.return_value = gca_table_spec.TableSpec()
@@ -3003,7 +3015,7 @@ async def test_update_table_spec_field_headers_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.update_table_spec), "__call__"
+ type(client.transport.update_table_spec), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_table_spec.TableSpec()
@@ -3028,7 +3040,7 @@ def test_update_table_spec_flattened():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.update_table_spec), "__call__"
+ type(client.transport.update_table_spec), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_table_spec.TableSpec()
@@ -3065,7 +3077,7 @@ async def test_update_table_spec_flattened_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.update_table_spec), "__call__"
+ type(client.transport.update_table_spec), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_table_spec.TableSpec()
@@ -3112,7 +3124,7 @@ def test_get_column_spec(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.get_column_spec), "__call__") as call:
+ with mock.patch.object(type(client.transport.get_column_spec), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = column_spec.ColumnSpec(
name="name_value", display_name="display_name_value", etag="etag_value",
@@ -3127,6 +3139,7 @@ def test_get_column_spec(
assert args[0] == service.GetColumnSpecRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, column_spec.ColumnSpec)
assert response.name == "name_value"
@@ -3141,19 +3154,19 @@ def test_get_column_spec_from_dict():
@pytest.mark.asyncio
-async def test_get_column_spec_async(transport: str = "grpc_asyncio"):
+async def test_get_column_spec_async(
+ transport: str = "grpc_asyncio", request_type=service.GetColumnSpecRequest
+):
client = AutoMlAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = service.GetColumnSpecRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.get_column_spec), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.get_column_spec), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
column_spec.ColumnSpec(
@@ -3167,7 +3180,7 @@ async def test_get_column_spec_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == service.GetColumnSpecRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, column_spec.ColumnSpec)
@@ -3179,6 +3192,11 @@ async def test_get_column_spec_async(transport: str = "grpc_asyncio"):
assert response.etag == "etag_value"
+@pytest.mark.asyncio
+async def test_get_column_spec_async_from_dict():
+ await test_get_column_spec_async(request_type=dict)
+
+
def test_get_column_spec_field_headers():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
@@ -3188,7 +3206,7 @@ def test_get_column_spec_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.get_column_spec), "__call__") as call:
+ with mock.patch.object(type(client.transport.get_column_spec), "__call__") as call:
call.return_value = column_spec.ColumnSpec()
client.get_column_spec(request)
@@ -3213,9 +3231,7 @@ async def test_get_column_spec_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.get_column_spec), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.get_column_spec), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
column_spec.ColumnSpec()
)
@@ -3236,7 +3252,7 @@ def test_get_column_spec_flattened():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.get_column_spec), "__call__") as call:
+ with mock.patch.object(type(client.transport.get_column_spec), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = column_spec.ColumnSpec()
@@ -3268,9 +3284,7 @@ async def test_get_column_spec_flattened_async():
client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.get_column_spec), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.get_column_spec), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = column_spec.ColumnSpec()
@@ -3314,7 +3328,7 @@ def test_list_column_specs(
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.list_column_specs), "__call__"
+ type(client.transport.list_column_specs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = service.ListColumnSpecsResponse(
@@ -3330,6 +3344,7 @@ def test_list_column_specs(
assert args[0] == service.ListColumnSpecsRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, pagers.ListColumnSpecsPager)
assert response.next_page_token == "next_page_token_value"
@@ -3340,18 +3355,20 @@ def test_list_column_specs_from_dict():
@pytest.mark.asyncio
-async def test_list_column_specs_async(transport: str = "grpc_asyncio"):
+async def test_list_column_specs_async(
+ transport: str = "grpc_asyncio", request_type=service.ListColumnSpecsRequest
+):
client = AutoMlAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = service.ListColumnSpecsRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.list_column_specs), "__call__"
+ type(client.transport.list_column_specs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
@@ -3364,7 +3381,7 @@ async def test_list_column_specs_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == service.ListColumnSpecsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListColumnSpecsAsyncPager)
@@ -3372,6 +3389,11 @@ async def test_list_column_specs_async(transport: str = "grpc_asyncio"):
assert response.next_page_token == "next_page_token_value"
+@pytest.mark.asyncio
+async def test_list_column_specs_async_from_dict():
+ await test_list_column_specs_async(request_type=dict)
+
+
def test_list_column_specs_field_headers():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
@@ -3382,7 +3404,7 @@ def test_list_column_specs_field_headers():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.list_column_specs), "__call__"
+ type(client.transport.list_column_specs), "__call__"
) as call:
call.return_value = service.ListColumnSpecsResponse()
@@ -3409,7 +3431,7 @@ async def test_list_column_specs_field_headers_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.list_column_specs), "__call__"
+ type(client.transport.list_column_specs), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListColumnSpecsResponse()
@@ -3432,7 +3454,7 @@ def test_list_column_specs_flattened():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.list_column_specs), "__call__"
+ type(client.transport.list_column_specs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = service.ListColumnSpecsResponse()
@@ -3466,7 +3488,7 @@ async def test_list_column_specs_flattened_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.list_column_specs), "__call__"
+ type(client.transport.list_column_specs), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = service.ListColumnSpecsResponse()
@@ -3503,7 +3525,7 @@ def test_list_column_specs_pager():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.list_column_specs), "__call__"
+ type(client.transport.list_column_specs), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
@@ -3543,7 +3565,7 @@ def test_list_column_specs_pages():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.list_column_specs), "__call__"
+ type(client.transport.list_column_specs), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
@@ -3575,7 +3597,7 @@ async def test_list_column_specs_async_pager():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.list_column_specs),
+ type(client.transport.list_column_specs),
"__call__",
new_callable=mock.AsyncMock,
) as call:
@@ -3614,7 +3636,7 @@ async def test_list_column_specs_async_pages():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.list_column_specs),
+ type(client.transport.list_column_specs),
"__call__",
new_callable=mock.AsyncMock,
) as call:
@@ -3657,7 +3679,7 @@ def test_update_column_spec(
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.update_column_spec), "__call__"
+ type(client.transport.update_column_spec), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_column_spec.ColumnSpec(
@@ -3673,6 +3695,7 @@ def test_update_column_spec(
assert args[0] == service.UpdateColumnSpecRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, gca_column_spec.ColumnSpec)
assert response.name == "name_value"
@@ -3687,18 +3710,20 @@ def test_update_column_spec_from_dict():
@pytest.mark.asyncio
-async def test_update_column_spec_async(transport: str = "grpc_asyncio"):
+async def test_update_column_spec_async(
+ transport: str = "grpc_asyncio", request_type=service.UpdateColumnSpecRequest
+):
client = AutoMlAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = service.UpdateColumnSpecRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.update_column_spec), "__call__"
+ type(client.transport.update_column_spec), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
@@ -3713,7 +3738,7 @@ async def test_update_column_spec_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == service.UpdateColumnSpecRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, gca_column_spec.ColumnSpec)
@@ -3725,6 +3750,11 @@ async def test_update_column_spec_async(transport: str = "grpc_asyncio"):
assert response.etag == "etag_value"
+@pytest.mark.asyncio
+async def test_update_column_spec_async_from_dict():
+ await test_update_column_spec_async(request_type=dict)
+
+
def test_update_column_spec_field_headers():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
@@ -3735,7 +3765,7 @@ def test_update_column_spec_field_headers():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.update_column_spec), "__call__"
+ type(client.transport.update_column_spec), "__call__"
) as call:
call.return_value = gca_column_spec.ColumnSpec()
@@ -3764,7 +3794,7 @@ async def test_update_column_spec_field_headers_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.update_column_spec), "__call__"
+ type(client.transport.update_column_spec), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
gca_column_spec.ColumnSpec()
@@ -3789,7 +3819,7 @@ def test_update_column_spec_flattened():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.update_column_spec), "__call__"
+ type(client.transport.update_column_spec), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_column_spec.ColumnSpec()
@@ -3826,7 +3856,7 @@ async def test_update_column_spec_flattened_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.update_column_spec), "__call__"
+ type(client.transport.update_column_spec), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = gca_column_spec.ColumnSpec()
@@ -3871,7 +3901,7 @@ def test_create_model(transport: str = "grpc", request_type=service.CreateModelR
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.create_model), "__call__") as call:
+ with mock.patch.object(type(client.transport.create_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
@@ -3892,19 +3922,19 @@ def test_create_model_from_dict():
@pytest.mark.asyncio
-async def test_create_model_async(transport: str = "grpc_asyncio"):
+async def test_create_model_async(
+ transport: str = "grpc_asyncio", request_type=service.CreateModelRequest
+):
client = AutoMlAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = service.CreateModelRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.create_model), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.create_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
@@ -3916,12 +3946,17 @@ async def test_create_model_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == service.CreateModelRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
+@pytest.mark.asyncio
+async def test_create_model_async_from_dict():
+ await test_create_model_async(request_type=dict)
+
+
def test_create_model_field_headers():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
@@ -3931,7 +3966,7 @@ def test_create_model_field_headers():
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.create_model), "__call__") as call:
+ with mock.patch.object(type(client.transport.create_model), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.create_model(request)
@@ -3956,9 +3991,7 @@ async def test_create_model_field_headers_async():
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.create_model), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.create_model), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
@@ -3979,7 +4012,7 @@ def test_create_model_flattened():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.create_model), "__call__") as call:
+ with mock.patch.object(type(client.transport.create_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
@@ -4030,9 +4063,7 @@ async def test_create_model_flattened_async():
client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.create_model), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.create_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
@@ -4092,7 +4123,7 @@ def test_get_model(transport: str = "grpc", request_type=service.GetModelRequest
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.get_model), "__call__") as call:
+ with mock.patch.object(type(client.transport.get_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = model.Model(
name="name_value",
@@ -4113,6 +4144,7 @@ def test_get_model(transport: str = "grpc", request_type=service.GetModelRequest
assert args[0] == service.GetModelRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, model.Model)
assert response.name == "name_value"
@@ -4129,19 +4161,19 @@ def test_get_model_from_dict():
@pytest.mark.asyncio
-async def test_get_model_async(transport: str = "grpc_asyncio"):
+async def test_get_model_async(
+ transport: str = "grpc_asyncio", request_type=service.GetModelRequest
+):
client = AutoMlAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = service.GetModelRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.get_model), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.get_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
model.Model(
@@ -4158,7 +4190,7 @@ async def test_get_model_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == service.GetModelRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, model.Model)
@@ -4172,6 +4204,11 @@ async def test_get_model_async(transport: str = "grpc_asyncio"):
assert response.deployment_state == model.Model.DeploymentState.DEPLOYED
+@pytest.mark.asyncio
+async def test_get_model_async_from_dict():
+ await test_get_model_async(request_type=dict)
+
+
def test_get_model_field_headers():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
@@ -4181,7 +4218,7 @@ def test_get_model_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.get_model), "__call__") as call:
+ with mock.patch.object(type(client.transport.get_model), "__call__") as call:
call.return_value = model.Model()
client.get_model(request)
@@ -4206,9 +4243,7 @@ async def test_get_model_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.get_model), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.get_model), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(model.Model())
await client.get_model(request)
@@ -4227,7 +4262,7 @@ def test_get_model_flattened():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.get_model), "__call__") as call:
+ with mock.patch.object(type(client.transport.get_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = model.Model()
@@ -4259,9 +4294,7 @@ async def test_get_model_flattened_async():
client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.get_model), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.get_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = model.Model()
@@ -4300,7 +4333,7 @@ def test_list_models(transport: str = "grpc", request_type=service.ListModelsReq
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_models), "__call__") as call:
+ with mock.patch.object(type(client.transport.list_models), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = service.ListModelsResponse(
next_page_token="next_page_token_value",
@@ -4315,6 +4348,7 @@ def test_list_models(transport: str = "grpc", request_type=service.ListModelsReq
assert args[0] == service.ListModelsRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, pagers.ListModelsPager)
assert response.next_page_token == "next_page_token_value"
@@ -4325,19 +4359,19 @@ def test_list_models_from_dict():
@pytest.mark.asyncio
-async def test_list_models_async(transport: str = "grpc_asyncio"):
+async def test_list_models_async(
+ transport: str = "grpc_asyncio", request_type=service.ListModelsRequest
+):
client = AutoMlAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = service.ListModelsRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.list_models), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.list_models), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListModelsResponse(next_page_token="next_page_token_value",)
@@ -4349,7 +4383,7 @@ async def test_list_models_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == service.ListModelsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListModelsAsyncPager)
@@ -4357,6 +4391,11 @@ async def test_list_models_async(transport: str = "grpc_asyncio"):
assert response.next_page_token == "next_page_token_value"
+@pytest.mark.asyncio
+async def test_list_models_async_from_dict():
+ await test_list_models_async(request_type=dict)
+
+
def test_list_models_field_headers():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
@@ -4366,7 +4405,7 @@ def test_list_models_field_headers():
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_models), "__call__") as call:
+ with mock.patch.object(type(client.transport.list_models), "__call__") as call:
call.return_value = service.ListModelsResponse()
client.list_models(request)
@@ -4391,9 +4430,7 @@ async def test_list_models_field_headers_async():
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.list_models), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.list_models), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListModelsResponse()
)
@@ -4414,7 +4451,7 @@ def test_list_models_flattened():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_models), "__call__") as call:
+ with mock.patch.object(type(client.transport.list_models), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = service.ListModelsResponse()
@@ -4446,9 +4483,7 @@ async def test_list_models_flattened_async():
client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.list_models), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.list_models), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = service.ListModelsResponse()
@@ -4483,7 +4518,7 @@ def test_list_models_pager():
client = AutoMlClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_models), "__call__") as call:
+ with mock.patch.object(type(client.transport.list_models), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListModelsResponse(
@@ -4513,7 +4548,7 @@ def test_list_models_pages():
client = AutoMlClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_models), "__call__") as call:
+ with mock.patch.object(type(client.transport.list_models), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
service.ListModelsResponse(
@@ -4536,9 +4571,7 @@ async def test_list_models_async_pager():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.list_models),
- "__call__",
- new_callable=mock.AsyncMock,
+ type(client.transport.list_models), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
@@ -4567,9 +4600,7 @@ async def test_list_models_async_pages():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.list_models),
- "__call__",
- new_callable=mock.AsyncMock,
+ type(client.transport.list_models), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
@@ -4599,7 +4630,7 @@ def test_delete_model(transport: str = "grpc", request_type=service.DeleteModelR
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.delete_model), "__call__") as call:
+ with mock.patch.object(type(client.transport.delete_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
@@ -4620,19 +4651,19 @@ def test_delete_model_from_dict():
@pytest.mark.asyncio
-async def test_delete_model_async(transport: str = "grpc_asyncio"):
+async def test_delete_model_async(
+ transport: str = "grpc_asyncio", request_type=service.DeleteModelRequest
+):
client = AutoMlAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = service.DeleteModelRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.delete_model), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.delete_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
@@ -4644,12 +4675,17 @@ async def test_delete_model_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == service.DeleteModelRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
+@pytest.mark.asyncio
+async def test_delete_model_async_from_dict():
+ await test_delete_model_async(request_type=dict)
+
+
def test_delete_model_field_headers():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
@@ -4659,7 +4695,7 @@ def test_delete_model_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.delete_model), "__call__") as call:
+ with mock.patch.object(type(client.transport.delete_model), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.delete_model(request)
@@ -4684,9 +4720,7 @@ async def test_delete_model_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.delete_model), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.delete_model), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
@@ -4707,7 +4741,7 @@ def test_delete_model_flattened():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.delete_model), "__call__") as call:
+ with mock.patch.object(type(client.transport.delete_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
@@ -4739,9 +4773,7 @@ async def test_delete_model_flattened_async():
client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.delete_model), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.delete_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
@@ -4782,7 +4814,7 @@ def test_deploy_model(transport: str = "grpc", request_type=service.DeployModelR
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.deploy_model), "__call__") as call:
+ with mock.patch.object(type(client.transport.deploy_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
@@ -4803,19 +4835,19 @@ def test_deploy_model_from_dict():
@pytest.mark.asyncio
-async def test_deploy_model_async(transport: str = "grpc_asyncio"):
+async def test_deploy_model_async(
+ transport: str = "grpc_asyncio", request_type=service.DeployModelRequest
+):
client = AutoMlAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = service.DeployModelRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.deploy_model), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.deploy_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
@@ -4827,12 +4859,17 @@ async def test_deploy_model_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == service.DeployModelRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
+@pytest.mark.asyncio
+async def test_deploy_model_async_from_dict():
+ await test_deploy_model_async(request_type=dict)
+
+
def test_deploy_model_field_headers():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
@@ -4842,7 +4879,7 @@ def test_deploy_model_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.deploy_model), "__call__") as call:
+ with mock.patch.object(type(client.transport.deploy_model), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.deploy_model(request)
@@ -4867,9 +4904,7 @@ async def test_deploy_model_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.deploy_model), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.deploy_model), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
@@ -4890,7 +4925,7 @@ def test_deploy_model_flattened():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.deploy_model), "__call__") as call:
+ with mock.patch.object(type(client.transport.deploy_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
@@ -4922,9 +4957,7 @@ async def test_deploy_model_flattened_async():
client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.deploy_model), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.deploy_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
@@ -4967,7 +5000,7 @@ def test_undeploy_model(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.undeploy_model), "__call__") as call:
+ with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
@@ -4988,19 +5021,19 @@ def test_undeploy_model_from_dict():
@pytest.mark.asyncio
-async def test_undeploy_model_async(transport: str = "grpc_asyncio"):
+async def test_undeploy_model_async(
+ transport: str = "grpc_asyncio", request_type=service.UndeployModelRequest
+):
client = AutoMlAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = service.UndeployModelRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.undeploy_model), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
@@ -5012,12 +5045,17 @@ async def test_undeploy_model_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == service.UndeployModelRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
+@pytest.mark.asyncio
+async def test_undeploy_model_async_from_dict():
+ await test_undeploy_model_async(request_type=dict)
+
+
def test_undeploy_model_field_headers():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
@@ -5027,7 +5065,7 @@ def test_undeploy_model_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.undeploy_model), "__call__") as call:
+ with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.undeploy_model(request)
@@ -5052,9 +5090,7 @@ async def test_undeploy_model_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.undeploy_model), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
@@ -5075,7 +5111,7 @@ def test_undeploy_model_flattened():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.undeploy_model), "__call__") as call:
+ with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
@@ -5107,9 +5143,7 @@ async def test_undeploy_model_flattened_async():
client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.undeploy_model), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.undeploy_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
@@ -5150,7 +5184,7 @@ def test_export_model(transport: str = "grpc", request_type=service.ExportModelR
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.export_model), "__call__") as call:
+ with mock.patch.object(type(client.transport.export_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
@@ -5171,19 +5205,19 @@ def test_export_model_from_dict():
@pytest.mark.asyncio
-async def test_export_model_async(transport: str = "grpc_asyncio"):
+async def test_export_model_async(
+ transport: str = "grpc_asyncio", request_type=service.ExportModelRequest
+):
client = AutoMlAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = service.ExportModelRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.export_model), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.export_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
@@ -5195,12 +5229,17 @@ async def test_export_model_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == service.ExportModelRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
+@pytest.mark.asyncio
+async def test_export_model_async_from_dict():
+ await test_export_model_async(request_type=dict)
+
+
def test_export_model_field_headers():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
@@ -5210,7 +5249,7 @@ def test_export_model_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.export_model), "__call__") as call:
+ with mock.patch.object(type(client.transport.export_model), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.export_model(request)
@@ -5235,9 +5274,7 @@ async def test_export_model_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.export_model), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.export_model), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
@@ -5258,7 +5295,7 @@ def test_export_model_flattened():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.export_model), "__call__") as call:
+ with mock.patch.object(type(client.transport.export_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
@@ -5309,9 +5346,7 @@ async def test_export_model_flattened_async():
client = AutoMlAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.export_model), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.export_model), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
@@ -5374,7 +5409,7 @@ def test_export_evaluated_examples(
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.export_evaluated_examples), "__call__"
+ type(client.transport.export_evaluated_examples), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
@@ -5396,18 +5431,20 @@ def test_export_evaluated_examples_from_dict():
@pytest.mark.asyncio
-async def test_export_evaluated_examples_async(transport: str = "grpc_asyncio"):
+async def test_export_evaluated_examples_async(
+ transport: str = "grpc_asyncio", request_type=service.ExportEvaluatedExamplesRequest
+):
client = AutoMlAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = service.ExportEvaluatedExamplesRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.export_evaluated_examples), "__call__"
+ type(client.transport.export_evaluated_examples), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
@@ -5420,12 +5457,17 @@ async def test_export_evaluated_examples_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == service.ExportEvaluatedExamplesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
+@pytest.mark.asyncio
+async def test_export_evaluated_examples_async_from_dict():
+ await test_export_evaluated_examples_async(request_type=dict)
+
+
def test_export_evaluated_examples_field_headers():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
@@ -5436,7 +5478,7 @@ def test_export_evaluated_examples_field_headers():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.export_evaluated_examples), "__call__"
+ type(client.transport.export_evaluated_examples), "__call__"
) as call:
call.return_value = operations_pb2.Operation(name="operations/op")
@@ -5463,7 +5505,7 @@ async def test_export_evaluated_examples_field_headers_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.export_evaluated_examples), "__call__"
+ type(client.transport.export_evaluated_examples), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
@@ -5486,7 +5528,7 @@ def test_export_evaluated_examples_flattened():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.export_evaluated_examples), "__call__"
+ type(client.transport.export_evaluated_examples), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
@@ -5537,7 +5579,7 @@ async def test_export_evaluated_examples_flattened_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.export_evaluated_examples), "__call__"
+ type(client.transport.export_evaluated_examples), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
@@ -5599,7 +5641,7 @@ def test_get_model_evaluation(
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.get_model_evaluation), "__call__"
+ type(client.transport.get_model_evaluation), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = model_evaluation.ModelEvaluation(
@@ -5621,6 +5663,7 @@ def test_get_model_evaluation(
assert args[0] == service.GetModelEvaluationRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, model_evaluation.ModelEvaluation)
assert response.name == "name_value"
@@ -5637,18 +5680,20 @@ def test_get_model_evaluation_from_dict():
@pytest.mark.asyncio
-async def test_get_model_evaluation_async(transport: str = "grpc_asyncio"):
+async def test_get_model_evaluation_async(
+ transport: str = "grpc_asyncio", request_type=service.GetModelEvaluationRequest
+):
client = AutoMlAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = service.GetModelEvaluationRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.get_model_evaluation), "__call__"
+ type(client.transport.get_model_evaluation), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
@@ -5666,7 +5711,7 @@ async def test_get_model_evaluation_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == service.GetModelEvaluationRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, model_evaluation.ModelEvaluation)
@@ -5680,6 +5725,11 @@ async def test_get_model_evaluation_async(transport: str = "grpc_asyncio"):
assert response.evaluated_example_count == 2446
+@pytest.mark.asyncio
+async def test_get_model_evaluation_async_from_dict():
+ await test_get_model_evaluation_async(request_type=dict)
+
+
def test_get_model_evaluation_field_headers():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
@@ -5690,7 +5740,7 @@ def test_get_model_evaluation_field_headers():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.get_model_evaluation), "__call__"
+ type(client.transport.get_model_evaluation), "__call__"
) as call:
call.return_value = model_evaluation.ModelEvaluation()
@@ -5717,7 +5767,7 @@ async def test_get_model_evaluation_field_headers_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.get_model_evaluation), "__call__"
+ type(client.transport.get_model_evaluation), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
model_evaluation.ModelEvaluation()
@@ -5740,7 +5790,7 @@ def test_get_model_evaluation_flattened():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.get_model_evaluation), "__call__"
+ type(client.transport.get_model_evaluation), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = model_evaluation.ModelEvaluation()
@@ -5774,7 +5824,7 @@ async def test_get_model_evaluation_flattened_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.get_model_evaluation), "__call__"
+ type(client.transport.get_model_evaluation), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = model_evaluation.ModelEvaluation()
@@ -5819,7 +5869,7 @@ def test_list_model_evaluations(
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.list_model_evaluations), "__call__"
+ type(client.transport.list_model_evaluations), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = service.ListModelEvaluationsResponse(
@@ -5835,6 +5885,7 @@ def test_list_model_evaluations(
assert args[0] == service.ListModelEvaluationsRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, pagers.ListModelEvaluationsPager)
assert response.next_page_token == "next_page_token_value"
@@ -5845,18 +5896,20 @@ def test_list_model_evaluations_from_dict():
@pytest.mark.asyncio
-async def test_list_model_evaluations_async(transport: str = "grpc_asyncio"):
+async def test_list_model_evaluations_async(
+ transport: str = "grpc_asyncio", request_type=service.ListModelEvaluationsRequest
+):
client = AutoMlAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = service.ListModelEvaluationsRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.list_model_evaluations), "__call__"
+ type(client.transport.list_model_evaluations), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
@@ -5871,7 +5924,7 @@ async def test_list_model_evaluations_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == service.ListModelEvaluationsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListModelEvaluationsAsyncPager)
@@ -5879,6 +5932,11 @@ async def test_list_model_evaluations_async(transport: str = "grpc_asyncio"):
assert response.next_page_token == "next_page_token_value"
+@pytest.mark.asyncio
+async def test_list_model_evaluations_async_from_dict():
+ await test_list_model_evaluations_async(request_type=dict)
+
+
def test_list_model_evaluations_field_headers():
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
@@ -5889,7 +5947,7 @@ def test_list_model_evaluations_field_headers():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.list_model_evaluations), "__call__"
+ type(client.transport.list_model_evaluations), "__call__"
) as call:
call.return_value = service.ListModelEvaluationsResponse()
@@ -5916,7 +5974,7 @@ async def test_list_model_evaluations_field_headers_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.list_model_evaluations), "__call__"
+ type(client.transport.list_model_evaluations), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
service.ListModelEvaluationsResponse()
@@ -5939,7 +5997,7 @@ def test_list_model_evaluations_flattened():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.list_model_evaluations), "__call__"
+ type(client.transport.list_model_evaluations), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = service.ListModelEvaluationsResponse()
@@ -5973,7 +6031,7 @@ async def test_list_model_evaluations_flattened_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.list_model_evaluations), "__call__"
+ type(client.transport.list_model_evaluations), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = service.ListModelEvaluationsResponse()
@@ -6010,7 +6068,7 @@ def test_list_model_evaluations_pager():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.list_model_evaluations), "__call__"
+ type(client.transport.list_model_evaluations), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
@@ -6056,7 +6114,7 @@ def test_list_model_evaluations_pages():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.list_model_evaluations), "__call__"
+ type(client.transport.list_model_evaluations), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
@@ -6094,7 +6152,7 @@ async def test_list_model_evaluations_async_pager():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.list_model_evaluations),
+ type(client.transport.list_model_evaluations),
"__call__",
new_callable=mock.AsyncMock,
) as call:
@@ -6139,7 +6197,7 @@ async def test_list_model_evaluations_async_pages():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.list_model_evaluations),
+ type(client.transport.list_model_evaluations),
"__call__",
new_callable=mock.AsyncMock,
) as call:
@@ -6211,7 +6269,7 @@ def test_transport_instance():
credentials=credentials.AnonymousCredentials(),
)
client = AutoMlClient(transport=transport)
- assert client._transport is transport
+ assert client.transport is transport
def test_transport_get_channel():
@@ -6244,7 +6302,7 @@ def test_transport_adc(transport_class):
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = AutoMlClient(credentials=credentials.AnonymousCredentials(),)
- assert isinstance(client._transport, transports.AutoMlGrpcTransport,)
+ assert isinstance(client.transport, transports.AutoMlGrpcTransport,)
def test_auto_ml_base_transport_error():
@@ -6366,7 +6424,7 @@ def test_auto_ml_host_no_port():
api_endpoint="automl.googleapis.com"
),
)
- assert client._transport._host == "automl.googleapis.com:443"
+ assert client.transport._host == "automl.googleapis.com:443"
def test_auto_ml_host_with_port():
@@ -6376,7 +6434,7 @@ def test_auto_ml_host_with_port():
api_endpoint="automl.googleapis.com:8000"
),
)
- assert client._transport._host == "automl.googleapis.com:8000"
+ assert client.transport._host == "automl.googleapis.com:8000"
def test_auto_ml_grpc_transport_channel():
@@ -6388,6 +6446,7 @@ def test_auto_ml_grpc_transport_channel():
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
+ assert transport._ssl_channel_credentials == None
def test_auto_ml_grpc_asyncio_transport_channel():
@@ -6399,6 +6458,7 @@ def test_auto_ml_grpc_asyncio_transport_channel():
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
+ assert transport._ssl_channel_credentials == None
@pytest.mark.parametrize(
@@ -6441,6 +6501,7 @@ def test_auto_ml_transport_channel_mtls_with_client_cert_source(transport_class)
quota_project_id=None,
)
assert transport.grpc_channel == mock_grpc_channel
+ assert transport._ssl_channel_credentials == mock_ssl_cred
@pytest.mark.parametrize(
@@ -6484,7 +6545,7 @@ def test_auto_ml_grpc_lro_client():
client = AutoMlClient(
credentials=credentials.AnonymousCredentials(), transport="grpc",
)
- transport = client._transport
+ transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
@@ -6497,7 +6558,7 @@ def test_auto_ml_grpc_lro_async_client():
client = AutoMlAsyncClient(
credentials=credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
- transport = client._client._transport
+ transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
@@ -6506,12 +6567,44 @@ def test_auto_ml_grpc_lro_async_client():
assert transport.operations_client is transport.operations_client
-def test_column_spec_path():
+def test_annotation_spec_path():
project = "squid"
location = "clam"
dataset = "whelk"
- table_spec = "octopus"
- column_spec = "oyster"
+ annotation_spec = "octopus"
+
+ expected = "projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}".format(
+ project=project,
+ location=location,
+ dataset=dataset,
+ annotation_spec=annotation_spec,
+ )
+ actual = AutoMlClient.annotation_spec_path(
+ project, location, dataset, annotation_spec
+ )
+ assert expected == actual
+
+
+def test_parse_annotation_spec_path():
+ expected = {
+ "project": "oyster",
+ "location": "nudibranch",
+ "dataset": "cuttlefish",
+ "annotation_spec": "mussel",
+ }
+ path = AutoMlClient.annotation_spec_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = AutoMlClient.parse_annotation_spec_path(path)
+ assert expected == actual
+
+
+def test_column_spec_path():
+ project = "winkle"
+ location = "nautilus"
+ dataset = "scallop"
+ table_spec = "abalone"
+ column_spec = "squid"
expected = "projects/{project}/locations/{location}/datasets/{dataset}/tableSpecs/{table_spec}/columnSpecs/{column_spec}".format(
project=project,
@@ -6528,11 +6621,11 @@ def test_column_spec_path():
def test_parse_column_spec_path():
expected = {
- "project": "nudibranch",
- "location": "cuttlefish",
- "dataset": "mussel",
- "table_spec": "winkle",
- "column_spec": "nautilus",
+ "project": "clam",
+ "location": "whelk",
+ "dataset": "octopus",
+ "table_spec": "oyster",
+ "column_spec": "nudibranch",
}
path = AutoMlClient.column_spec_path(**expected)
@@ -6542,9 +6635,9 @@ def test_parse_column_spec_path():
def test_dataset_path():
- project = "squid"
- location = "clam"
- dataset = "whelk"
+ project = "cuttlefish"
+ location = "mussel"
+ dataset = "winkle"
expected = "projects/{project}/locations/{location}/datasets/{dataset}".format(
project=project, location=location, dataset=dataset,
@@ -6555,9 +6648,9 @@ def test_dataset_path():
def test_parse_dataset_path():
expected = {
- "project": "octopus",
- "location": "oyster",
- "dataset": "nudibranch",
+ "project": "nautilus",
+ "location": "scallop",
+ "dataset": "abalone",
}
path = AutoMlClient.dataset_path(**expected)
@@ -6591,11 +6684,43 @@ def test_parse_model_path():
assert expected == actual
+def test_model_evaluation_path():
+ project = "cuttlefish"
+ location = "mussel"
+ model = "winkle"
+ model_evaluation = "nautilus"
+
+ expected = "projects/{project}/locations/{location}/models/{model}/modelEvaluations/{model_evaluation}".format(
+ project=project,
+ location=location,
+ model=model,
+ model_evaluation=model_evaluation,
+ )
+ actual = AutoMlClient.model_evaluation_path(
+ project, location, model, model_evaluation
+ )
+ assert expected == actual
+
+
+def test_parse_model_evaluation_path():
+ expected = {
+ "project": "scallop",
+ "location": "abalone",
+ "model": "squid",
+ "model_evaluation": "clam",
+ }
+ path = AutoMlClient.model_evaluation_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = AutoMlClient.parse_model_evaluation_path(path)
+ assert expected == actual
+
+
def test_table_spec_path():
- project = "squid"
- location = "clam"
- dataset = "whelk"
- table_spec = "octopus"
+ project = "whelk"
+ location = "octopus"
+ dataset = "oyster"
+ table_spec = "nudibranch"
expected = "projects/{project}/locations/{location}/datasets/{dataset}/tableSpecs/{table_spec}".format(
project=project, location=location, dataset=dataset, table_spec=table_spec,
@@ -6606,10 +6731,10 @@ def test_table_spec_path():
def test_parse_table_spec_path():
expected = {
- "project": "oyster",
- "location": "nudibranch",
- "dataset": "cuttlefish",
- "table_spec": "mussel",
+ "project": "cuttlefish",
+ "location": "mussel",
+ "dataset": "winkle",
+ "table_spec": "nautilus",
}
path = AutoMlClient.table_spec_path(**expected)
@@ -6618,6 +6743,107 @@ def test_parse_table_spec_path():
assert expected == actual
+def test_common_billing_account_path():
+ billing_account = "scallop"
+
+ expected = "billingAccounts/{billing_account}".format(
+ billing_account=billing_account,
+ )
+ actual = AutoMlClient.common_billing_account_path(billing_account)
+ assert expected == actual
+
+
+def test_parse_common_billing_account_path():
+ expected = {
+ "billing_account": "abalone",
+ }
+ path = AutoMlClient.common_billing_account_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = AutoMlClient.parse_common_billing_account_path(path)
+ assert expected == actual
+
+
+def test_common_folder_path():
+ folder = "squid"
+
+ expected = "folders/{folder}".format(folder=folder,)
+ actual = AutoMlClient.common_folder_path(folder)
+ assert expected == actual
+
+
+def test_parse_common_folder_path():
+ expected = {
+ "folder": "clam",
+ }
+ path = AutoMlClient.common_folder_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = AutoMlClient.parse_common_folder_path(path)
+ assert expected == actual
+
+
+def test_common_organization_path():
+ organization = "whelk"
+
+ expected = "organizations/{organization}".format(organization=organization,)
+ actual = AutoMlClient.common_organization_path(organization)
+ assert expected == actual
+
+
+def test_parse_common_organization_path():
+ expected = {
+ "organization": "octopus",
+ }
+ path = AutoMlClient.common_organization_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = AutoMlClient.parse_common_organization_path(path)
+ assert expected == actual
+
+
+def test_common_project_path():
+ project = "oyster"
+
+ expected = "projects/{project}".format(project=project,)
+ actual = AutoMlClient.common_project_path(project)
+ assert expected == actual
+
+
+def test_parse_common_project_path():
+ expected = {
+ "project": "nudibranch",
+ }
+ path = AutoMlClient.common_project_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = AutoMlClient.parse_common_project_path(path)
+ assert expected == actual
+
+
+def test_common_location_path():
+ project = "cuttlefish"
+ location = "mussel"
+
+ expected = "projects/{project}/locations/{location}".format(
+ project=project, location=location,
+ )
+ actual = AutoMlClient.common_location_path(project, location)
+ assert expected == actual
+
+
+def test_parse_common_location_path():
+ expected = {
+ "project": "winkle",
+ "location": "nautilus",
+ }
+ path = AutoMlClient.common_location_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = AutoMlClient.parse_common_location_path(path)
+ assert expected == actual
+
+
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
diff --git a/tests/unit/gapic/automl_v1beta1/test_prediction_service.py b/tests/unit/gapic/automl_v1beta1/test_prediction_service.py
index 44c966c5..f41f1cdf 100644
--- a/tests/unit/gapic/automl_v1beta1/test_prediction_service.py
+++ b/tests/unit/gapic/automl_v1beta1/test_prediction_service.py
@@ -109,12 +109,12 @@ def test_prediction_service_client_from_service_account_file(client_class):
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
- assert client._transport._credentials == creds
+ assert client.transport._credentials == creds
client = client_class.from_service_account_json("dummy/file/path.json")
- assert client._transport._credentials == creds
+ assert client.transport._credentials == creds
- assert client._transport._host == "automl.googleapis.com:443"
+ assert client.transport._host == "automl.googleapis.com:443"
def test_prediction_service_client_get_transport_class():
@@ -474,7 +474,7 @@ def test_predict(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.predict), "__call__") as call:
+ with mock.patch.object(type(client.transport.predict), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = prediction_service.PredictResponse()
@@ -487,6 +487,7 @@ def test_predict(
assert args[0] == prediction_service.PredictRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, prediction_service.PredictResponse)
@@ -495,17 +496,19 @@ def test_predict_from_dict():
@pytest.mark.asyncio
-async def test_predict_async(transport: str = "grpc_asyncio"):
+async def test_predict_async(
+ transport: str = "grpc_asyncio", request_type=prediction_service.PredictRequest
+):
client = PredictionServiceAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = prediction_service.PredictRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._client._transport.predict), "__call__") as call:
+ with mock.patch.object(type(client.transport.predict), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
prediction_service.PredictResponse()
@@ -517,12 +520,17 @@ async def test_predict_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == prediction_service.PredictRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, prediction_service.PredictResponse)
+@pytest.mark.asyncio
+async def test_predict_async_from_dict():
+ await test_predict_async(request_type=dict)
+
+
def test_predict_field_headers():
client = PredictionServiceClient(credentials=credentials.AnonymousCredentials(),)
@@ -532,7 +540,7 @@ def test_predict_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.predict), "__call__") as call:
+ with mock.patch.object(type(client.transport.predict), "__call__") as call:
call.return_value = prediction_service.PredictResponse()
client.predict(request)
@@ -559,7 +567,7 @@ async def test_predict_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._client._transport.predict), "__call__") as call:
+ with mock.patch.object(type(client.transport.predict), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
prediction_service.PredictResponse()
)
@@ -580,7 +588,7 @@ def test_predict_flattened():
client = PredictionServiceClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.predict), "__call__") as call:
+ with mock.patch.object(type(client.transport.predict), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = prediction_service.PredictResponse()
@@ -631,7 +639,7 @@ async def test_predict_flattened_async():
)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._client._transport.predict), "__call__") as call:
+ with mock.patch.object(type(client.transport.predict), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = prediction_service.PredictResponse()
@@ -693,7 +701,7 @@ def test_batch_predict(
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.batch_predict), "__call__") as call:
+ with mock.patch.object(type(client.transport.batch_predict), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/spam")
@@ -714,19 +722,19 @@ def test_batch_predict_from_dict():
@pytest.mark.asyncio
-async def test_batch_predict_async(transport: str = "grpc_asyncio"):
+async def test_batch_predict_async(
+ transport: str = "grpc_asyncio", request_type=prediction_service.BatchPredictRequest
+):
client = PredictionServiceAsyncClient(
credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = prediction_service.BatchPredictRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.batch_predict), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.batch_predict), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/spam")
@@ -738,12 +746,17 @@ async def test_batch_predict_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == prediction_service.BatchPredictRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, future.Future)
+@pytest.mark.asyncio
+async def test_batch_predict_async_from_dict():
+ await test_batch_predict_async(request_type=dict)
+
+
def test_batch_predict_field_headers():
client = PredictionServiceClient(credentials=credentials.AnonymousCredentials(),)
@@ -753,7 +766,7 @@ def test_batch_predict_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.batch_predict), "__call__") as call:
+ with mock.patch.object(type(client.transport.batch_predict), "__call__") as call:
call.return_value = operations_pb2.Operation(name="operations/op")
client.batch_predict(request)
@@ -780,9 +793,7 @@ async def test_batch_predict_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.batch_predict), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.batch_predict), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
operations_pb2.Operation(name="operations/op")
)
@@ -803,7 +814,7 @@ def test_batch_predict_flattened():
client = PredictionServiceClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.batch_predict), "__call__") as call:
+ with mock.patch.object(type(client.transport.batch_predict), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
@@ -870,9 +881,7 @@ async def test_batch_predict_flattened_async():
)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.batch_predict), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.batch_predict), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = operations_pb2.Operation(name="operations/op")
@@ -974,7 +983,7 @@ def test_transport_instance():
credentials=credentials.AnonymousCredentials(),
)
client = PredictionServiceClient(transport=transport)
- assert client._transport is transport
+ assert client.transport is transport
def test_transport_get_channel():
@@ -1010,7 +1019,7 @@ def test_transport_adc(transport_class):
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = PredictionServiceClient(credentials=credentials.AnonymousCredentials(),)
- assert isinstance(client._transport, transports.PredictionServiceGrpcTransport,)
+ assert isinstance(client.transport, transports.PredictionServiceGrpcTransport,)
def test_prediction_service_base_transport_error():
@@ -1110,7 +1119,7 @@ def test_prediction_service_host_no_port():
api_endpoint="automl.googleapis.com"
),
)
- assert client._transport._host == "automl.googleapis.com:443"
+ assert client.transport._host == "automl.googleapis.com:443"
def test_prediction_service_host_with_port():
@@ -1120,7 +1129,7 @@ def test_prediction_service_host_with_port():
api_endpoint="automl.googleapis.com:8000"
),
)
- assert client._transport._host == "automl.googleapis.com:8000"
+ assert client.transport._host == "automl.googleapis.com:8000"
def test_prediction_service_grpc_transport_channel():
@@ -1132,6 +1141,7 @@ def test_prediction_service_grpc_transport_channel():
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
+ assert transport._ssl_channel_credentials == None
def test_prediction_service_grpc_asyncio_transport_channel():
@@ -1143,6 +1153,7 @@ def test_prediction_service_grpc_asyncio_transport_channel():
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
+ assert transport._ssl_channel_credentials == None
@pytest.mark.parametrize(
@@ -1190,6 +1201,7 @@ def test_prediction_service_transport_channel_mtls_with_client_cert_source(
quota_project_id=None,
)
assert transport.grpc_channel == mock_grpc_channel
+ assert transport._ssl_channel_credentials == mock_ssl_cred
@pytest.mark.parametrize(
@@ -1236,7 +1248,7 @@ def test_prediction_service_grpc_lro_client():
client = PredictionServiceClient(
credentials=credentials.AnonymousCredentials(), transport="grpc",
)
- transport = client._transport
+ transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
@@ -1249,7 +1261,7 @@ def test_prediction_service_grpc_lro_async_client():
client = PredictionServiceAsyncClient(
credentials=credentials.AnonymousCredentials(), transport="grpc_asyncio",
)
- transport = client._client._transport
+ transport = client.transport
# Ensure that we have a api-core operations client.
assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
@@ -1258,6 +1270,132 @@ def test_prediction_service_grpc_lro_async_client():
assert transport.operations_client is transport.operations_client
+def test_model_path():
+ project = "squid"
+ location = "clam"
+ model = "whelk"
+
+ expected = "projects/{project}/locations/{location}/models/{model}".format(
+ project=project, location=location, model=model,
+ )
+ actual = PredictionServiceClient.model_path(project, location, model)
+ assert expected == actual
+
+
+def test_parse_model_path():
+ expected = {
+ "project": "octopus",
+ "location": "oyster",
+ "model": "nudibranch",
+ }
+ path = PredictionServiceClient.model_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = PredictionServiceClient.parse_model_path(path)
+ assert expected == actual
+
+
+def test_common_billing_account_path():
+ billing_account = "cuttlefish"
+
+ expected = "billingAccounts/{billing_account}".format(
+ billing_account=billing_account,
+ )
+ actual = PredictionServiceClient.common_billing_account_path(billing_account)
+ assert expected == actual
+
+
+def test_parse_common_billing_account_path():
+ expected = {
+ "billing_account": "mussel",
+ }
+ path = PredictionServiceClient.common_billing_account_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = PredictionServiceClient.parse_common_billing_account_path(path)
+ assert expected == actual
+
+
+def test_common_folder_path():
+ folder = "winkle"
+
+ expected = "folders/{folder}".format(folder=folder,)
+ actual = PredictionServiceClient.common_folder_path(folder)
+ assert expected == actual
+
+
+def test_parse_common_folder_path():
+ expected = {
+ "folder": "nautilus",
+ }
+ path = PredictionServiceClient.common_folder_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = PredictionServiceClient.parse_common_folder_path(path)
+ assert expected == actual
+
+
+def test_common_organization_path():
+ organization = "scallop"
+
+ expected = "organizations/{organization}".format(organization=organization,)
+ actual = PredictionServiceClient.common_organization_path(organization)
+ assert expected == actual
+
+
+def test_parse_common_organization_path():
+ expected = {
+ "organization": "abalone",
+ }
+ path = PredictionServiceClient.common_organization_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = PredictionServiceClient.parse_common_organization_path(path)
+ assert expected == actual
+
+
+def test_common_project_path():
+ project = "squid"
+
+ expected = "projects/{project}".format(project=project,)
+ actual = PredictionServiceClient.common_project_path(project)
+ assert expected == actual
+
+
+def test_parse_common_project_path():
+ expected = {
+ "project": "clam",
+ }
+ path = PredictionServiceClient.common_project_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = PredictionServiceClient.parse_common_project_path(path)
+ assert expected == actual
+
+
+def test_common_location_path():
+ project = "whelk"
+ location = "octopus"
+
+ expected = "projects/{project}/locations/{location}".format(
+ project=project, location=location,
+ )
+ actual = PredictionServiceClient.common_location_path(project, location)
+ assert expected == actual
+
+
+def test_parse_common_location_path():
+ expected = {
+ "project": "oyster",
+ "location": "nudibranch",
+ }
+ path = PredictionServiceClient.common_location_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = PredictionServiceClient.parse_common_location_path(path)
+ assert expected == actual
+
+
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()