From c4ffea02fbc6c6566a4e772e2b353a5b4dc5b2fc Mon Sep 17 00:00:00 2001 From: arithmetic1728 <58957152+arithmetic1728@users.noreply.github.com> Date: Wed, 16 Sep 2020 13:16:31 -0700 Subject: [PATCH] feat: regenerate client lib to pick up new mtls env (#44) * feat: regenerate client lib to pick up new mtls env * update google-api-core and proto-plus version * fix package name --- .github/snippet-bot.yml | 0 .gitignore | 3 +- .kokoro/build.sh | 8 +- .kokoro/docker/docs/Dockerfile | 98 ++ .kokoro/docker/docs/fetch_gpg_keys.sh | 45 + .kokoro/docs/common.cfg | 21 +- .kokoro/docs/docs-presubmit.cfg | 17 + .kokoro/populate-secrets.sh | 43 + .kokoro/publish-docs.sh | 39 +- .kokoro/release/common.cfg | 50 +- .kokoro/trampoline.sh | 15 +- .kokoro/trampoline_v2.sh | 487 +++++++ .trampolinerc | 51 + docs/conf.py | 13 +- .../services/cluster_manager/async_client.py | 227 ++-- .../services/cluster_manager/client.py | 1173 +++++++++-------- .../cluster_manager/transports/base.py | 234 +++- .../cluster_manager/transports/grpc.py | 79 +- .../transports/grpc_asyncio.py | 76 +- .../container_v1/types/cluster_service.py | 4 +- .../services/cluster_manager/async_client.py | 249 ++-- .../services/cluster_manager/client.py | 1142 ++++++++-------- .../cluster_manager/transports/base.py | 255 +++- .../cluster_manager/transports/grpc.py | 79 +- .../transports/grpc_asyncio.py | 76 +- .../types/cluster_service.py | 10 +- noxfile.py | 39 + scripts/decrypt-secrets.sh | 15 +- scripts/fixup_container_v1_keywords.py | 1 + scripts/fixup_container_v1beta1_keywords.py | 1 + setup.py | 4 +- synth.metadata | 4 +- synth.py | 7 + tests/unit/gapic/container_v1/__init__.py | 1 + .../container_v1/test_cluster_manager.py | 1171 +++++++++++----- .../unit/gapic/container_v1beta1/__init__.py | 1 + .../container_v1beta1/test_cluster_manager.py | 1163 +++++++++++----- 37 files changed, 4867 insertions(+), 2034 deletions(-) create mode 100644 .github/snippet-bot.yml create mode 100644 .kokoro/docker/docs/Dockerfile create mode 100755 .kokoro/docker/docs/fetch_gpg_keys.sh create mode 100644 .kokoro/docs/docs-presubmit.cfg create mode 100755 .kokoro/populate-secrets.sh create mode 100755 .kokoro/trampoline_v2.sh create mode 100644 .trampolinerc diff --git a/.github/snippet-bot.yml b/.github/snippet-bot.yml new file mode 100644 index 00000000..e69de29b diff --git a/.gitignore b/.gitignore index b87e1ed5..b9daa52f 100644 --- a/.gitignore +++ b/.gitignore @@ -46,6 +46,7 @@ pip-log.txt # Built documentation docs/_build bigquery/docs/generated +docs.metadata # Virtual environment env/ @@ -57,4 +58,4 @@ system_tests/local_test_setup # Make sure a generated file isn't accidentally committed. pylintrc -pylintrc.test \ No newline at end of file +pylintrc.test diff --git a/.kokoro/build.sh b/.kokoro/build.sh index 15b8ca07..4b3b87c5 100755 --- a/.kokoro/build.sh +++ b/.kokoro/build.sh @@ -36,4 +36,10 @@ python3.6 -m pip uninstall --yes --quiet nox-automation python3.6 -m pip install --upgrade --quiet nox python3.6 -m nox --version -python3.6 -m nox +# If NOX_SESSION is set, it only runs the specified session, +# otherwise run all the sessions. +if [[ -n "${NOX_SESSION:-}" ]]; then + python3.6 -m nox -s "${NOX_SESSION:-}" +else + python3.6 -m nox +fi diff --git a/.kokoro/docker/docs/Dockerfile b/.kokoro/docker/docs/Dockerfile new file mode 100644 index 00000000..412b0b56 --- /dev/null +++ b/.kokoro/docker/docs/Dockerfile @@ -0,0 +1,98 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ubuntu:20.04 + +ENV DEBIAN_FRONTEND noninteractive + +# Ensure local Python is preferred over distribution Python. +ENV PATH /usr/local/bin:$PATH + +# Install dependencies. +RUN apt-get update \ + && apt-get install -y --no-install-recommends \ + apt-transport-https \ + build-essential \ + ca-certificates \ + curl \ + dirmngr \ + git \ + gpg-agent \ + graphviz \ + libbz2-dev \ + libdb5.3-dev \ + libexpat1-dev \ + libffi-dev \ + liblzma-dev \ + libreadline-dev \ + libsnappy-dev \ + libssl-dev \ + libsqlite3-dev \ + portaudio19-dev \ + redis-server \ + software-properties-common \ + ssh \ + sudo \ + tcl \ + tcl-dev \ + tk \ + tk-dev \ + uuid-dev \ + wget \ + zlib1g-dev \ + && add-apt-repository universe \ + && apt-get update \ + && apt-get -y install jq \ + && apt-get clean autoclean \ + && apt-get autoremove -y \ + && rm -rf /var/lib/apt/lists/* \ + && rm -f /var/cache/apt/archives/*.deb + + +COPY fetch_gpg_keys.sh /tmp +# Install the desired versions of Python. +RUN set -ex \ + && export GNUPGHOME="$(mktemp -d)" \ + && echo "disable-ipv6" >> "${GNUPGHOME}/dirmngr.conf" \ + && /tmp/fetch_gpg_keys.sh \ + && for PYTHON_VERSION in 3.7.8 3.8.5; do \ + wget --no-check-certificate -O python-${PYTHON_VERSION}.tar.xz "https://www.python.org/ftp/python/${PYTHON_VERSION%%[a-z]*}/Python-$PYTHON_VERSION.tar.xz" \ + && wget --no-check-certificate -O python-${PYTHON_VERSION}.tar.xz.asc "https://www.python.org/ftp/python/${PYTHON_VERSION%%[a-z]*}/Python-$PYTHON_VERSION.tar.xz.asc" \ + && gpg --batch --verify python-${PYTHON_VERSION}.tar.xz.asc python-${PYTHON_VERSION}.tar.xz \ + && rm -r python-${PYTHON_VERSION}.tar.xz.asc \ + && mkdir -p /usr/src/python-${PYTHON_VERSION} \ + && tar -xJC /usr/src/python-${PYTHON_VERSION} --strip-components=1 -f python-${PYTHON_VERSION}.tar.xz \ + && rm python-${PYTHON_VERSION}.tar.xz \ + && cd /usr/src/python-${PYTHON_VERSION} \ + && ./configure \ + --enable-shared \ + # This works only on Python 2.7 and throws a warning on every other + # version, but seems otherwise harmless. + --enable-unicode=ucs4 \ + --with-system-ffi \ + --without-ensurepip \ + && make -j$(nproc) \ + && make install \ + && ldconfig \ + ; done \ + && rm -rf "${GNUPGHOME}" \ + && rm -rf /usr/src/python* \ + && rm -rf ~/.cache/ + +RUN wget -O /tmp/get-pip.py 'https://bootstrap.pypa.io/get-pip.py' \ + && python3.7 /tmp/get-pip.py \ + && python3.8 /tmp/get-pip.py \ + && rm /tmp/get-pip.py + +CMD ["python3.7"] diff --git a/.kokoro/docker/docs/fetch_gpg_keys.sh b/.kokoro/docker/docs/fetch_gpg_keys.sh new file mode 100755 index 00000000..d653dd86 --- /dev/null +++ b/.kokoro/docker/docs/fetch_gpg_keys.sh @@ -0,0 +1,45 @@ +#!/bin/bash +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# A script to fetch gpg keys with retry. +# Avoid jinja parsing the file. +# + +function retry { + if [[ "${#}" -le 1 ]]; then + echo "Usage: ${0} retry_count commands.." + exit 1 + fi + local retries=${1} + local command="${@:2}" + until [[ "${retries}" -le 0 ]]; do + $command && return 0 + if [[ $? -ne 0 ]]; then + echo "command failed, retrying" + ((retries--)) + fi + done + return 1 +} + +# 3.6.9, 3.7.5 (Ned Deily) +retry 3 gpg --keyserver ha.pool.sks-keyservers.net --recv-keys \ + 0D96DF4D4110E5C43FBFB17F2D347EA6AA65421D + +# 3.8.0 (Ɓukasz Langa) +retry 3 gpg --keyserver ha.pool.sks-keyservers.net --recv-keys \ + E3FF2839C048B25C084DEBE9B26995E310250568 + +# diff --git a/.kokoro/docs/common.cfg b/.kokoro/docs/common.cfg index def495fc..85990041 100644 --- a/.kokoro/docs/common.cfg +++ b/.kokoro/docs/common.cfg @@ -11,12 +11,12 @@ action { gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" # Use the trampoline script to run in docker. -build_file: "python-container/.kokoro/trampoline.sh" +build_file: "python-container/.kokoro/trampoline_v2.sh" # Configure the docker image for kokoro-trampoline. env_vars: { key: "TRAMPOLINE_IMAGE" - value: "gcr.io/cloud-devrel-kokoro-resources/python-multi" + value: "gcr.io/cloud-devrel-kokoro-resources/python-lib-docs" } env_vars: { key: "TRAMPOLINE_BUILD_FILE" @@ -28,6 +28,23 @@ env_vars: { value: "docs-staging" } +env_vars: { + key: "V2_STAGING_BUCKET" + value: "docs-staging-v2-staging" +} + +# It will upload the docker image after successful builds. +env_vars: { + key: "TRAMPOLINE_IMAGE_UPLOAD" + value: "true" +} + +# It will always build the docker image. +env_vars: { + key: "TRAMPOLINE_DOCKERFILE" + value: ".kokoro/docker/docs/Dockerfile" +} + # Fetch the token needed for reporting release status to GitHub before_action { fetch_keystore { diff --git a/.kokoro/docs/docs-presubmit.cfg b/.kokoro/docs/docs-presubmit.cfg new file mode 100644 index 00000000..11181078 --- /dev/null +++ b/.kokoro/docs/docs-presubmit.cfg @@ -0,0 +1,17 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "STAGING_BUCKET" + value: "gcloud-python-test" +} + +env_vars: { + key: "V2_STAGING_BUCKET" + value: "gcloud-python-test" +} + +# We only upload the image in the main `docs` build. +env_vars: { + key: "TRAMPOLINE_IMAGE_UPLOAD" + value: "false" +} diff --git a/.kokoro/populate-secrets.sh b/.kokoro/populate-secrets.sh new file mode 100755 index 00000000..f5251425 --- /dev/null +++ b/.kokoro/populate-secrets.sh @@ -0,0 +1,43 @@ +#!/bin/bash +# Copyright 2020 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eo pipefail + +function now { date +"%Y-%m-%d %H:%M:%S" | tr -d '\n' ;} +function msg { println "$*" >&2 ;} +function println { printf '%s\n' "$(now) $*" ;} + + +# Populates requested secrets set in SECRET_MANAGER_KEYS from service account: +# kokoro-trampoline@cloud-devrel-kokoro-resources.iam.gserviceaccount.com +SECRET_LOCATION="${KOKORO_GFILE_DIR}/secret_manager" +msg "Creating folder on disk for secrets: ${SECRET_LOCATION}" +mkdir -p ${SECRET_LOCATION} +for key in $(echo ${SECRET_MANAGER_KEYS} | sed "s/,/ /g") +do + msg "Retrieving secret ${key}" + docker run --entrypoint=gcloud \ + --volume=${KOKORO_GFILE_DIR}:${KOKORO_GFILE_DIR} \ + gcr.io/google.com/cloudsdktool/cloud-sdk \ + secrets versions access latest \ + --project cloud-devrel-kokoro-resources \ + --secret ${key} > \ + "${SECRET_LOCATION}/${key}" + if [[ $? == 0 ]]; then + msg "Secret written to ${SECRET_LOCATION}/${key}" + else + msg "Error retrieving secret ${key}" + fi +done diff --git a/.kokoro/publish-docs.sh b/.kokoro/publish-docs.sh index d3f611b3..8acb14e8 100755 --- a/.kokoro/publish-docs.sh +++ b/.kokoro/publish-docs.sh @@ -18,26 +18,16 @@ set -eo pipefail # Disable buffering, so that the logs stream through. export PYTHONUNBUFFERED=1 -cd github/python-container - -# Remove old nox -python3.6 -m pip uninstall --yes --quiet nox-automation +export PATH="${HOME}/.local/bin:${PATH}" # Install nox -python3.6 -m pip install --upgrade --quiet nox -python3.6 -m nox --version +python3 -m pip install --user --upgrade --quiet nox +python3 -m nox --version # build docs nox -s docs -python3 -m pip install gcp-docuploader - -# install a json parser -sudo apt-get update -sudo apt-get -y install software-properties-common -sudo add-apt-repository universe -sudo apt-get update -sudo apt-get -y install jq +python3 -m pip install --user gcp-docuploader # create metadata python3 -m docuploader create-metadata \ @@ -52,4 +42,23 @@ python3 -m docuploader create-metadata \ cat docs.metadata # upload docs -python3 -m docuploader upload docs/_build/html --metadata-file docs.metadata --staging-bucket docs-staging +python3 -m docuploader upload docs/_build/html --metadata-file docs.metadata --staging-bucket "${STAGING_BUCKET}" + + +# docfx yaml files +nox -s docfx + +# create metadata. +python3 -m docuploader create-metadata \ + --name=$(jq --raw-output '.name // empty' .repo-metadata.json) \ + --version=$(python3 setup.py --version) \ + --language=$(jq --raw-output '.language // empty' .repo-metadata.json) \ + --distribution-name=$(python3 setup.py --name) \ + --product-page=$(jq --raw-output '.product_documentation // empty' .repo-metadata.json) \ + --github-repository=$(jq --raw-output '.repo // empty' .repo-metadata.json) \ + --issue-tracker=$(jq --raw-output '.issue_tracker // empty' .repo-metadata.json) + +cat docs.metadata + +# upload docs +python3 -m docuploader upload docs/_build/html/docfx_yaml --metadata-file docs.metadata --destination-prefix docfx --staging-bucket "${V2_STAGING_BUCKET}" diff --git a/.kokoro/release/common.cfg b/.kokoro/release/common.cfg index f5b39604..59633e6a 100644 --- a/.kokoro/release/common.cfg +++ b/.kokoro/release/common.cfg @@ -23,42 +23,18 @@ env_vars: { value: "github/python-container/.kokoro/release.sh" } -# Fetch the token needed for reporting release status to GitHub -before_action { - fetch_keystore { - keystore_resource { - keystore_config_id: 73713 - keyname: "yoshi-automation-github-key" - } - } -} - -# Fetch PyPI password -before_action { - fetch_keystore { - keystore_resource { - keystore_config_id: 73713 - keyname: "google_cloud_pypi_password" - } - } -} - -# Fetch magictoken to use with Magic Github Proxy -before_action { - fetch_keystore { - keystore_resource { - keystore_config_id: 73713 - keyname: "releasetool-magictoken" - } - } +# Fetch PyPI password +before_action { + fetch_keystore { + keystore_resource { + keystore_config_id: 73713 + keyname: "google_cloud_pypi_password" + } + } } -# Fetch api key to use with Magic Github Proxy -before_action { - fetch_keystore { - keystore_resource { - keystore_config_id: 73713 - keyname: "magic-github-proxy-api-key" - } - } -} +# Tokens needed to report release status back to GitHub +env_vars: { + key: "SECRET_MANAGER_KEYS" + value: "releasetool-publish-reporter-app,releasetool-publish-reporter-googleapis-installation,releasetool-publish-reporter-pem" +} \ No newline at end of file diff --git a/.kokoro/trampoline.sh b/.kokoro/trampoline.sh index e8c4251f..f39236e9 100755 --- a/.kokoro/trampoline.sh +++ b/.kokoro/trampoline.sh @@ -15,9 +15,14 @@ set -eo pipefail -python3 "${KOKORO_GFILE_DIR}/trampoline_v1.py" || ret_code=$? +# Always run the cleanup script, regardless of the success of bouncing into +# the container. +function cleanup() { + chmod +x ${KOKORO_GFILE_DIR}/trampoline_cleanup.sh + ${KOKORO_GFILE_DIR}/trampoline_cleanup.sh + echo "cleanup"; +} +trap cleanup EXIT -chmod +x ${KOKORO_GFILE_DIR}/trampoline_cleanup.sh -${KOKORO_GFILE_DIR}/trampoline_cleanup.sh || true - -exit ${ret_code} +$(dirname $0)/populate-secrets.sh # Secret Manager secrets. +python3 "${KOKORO_GFILE_DIR}/trampoline_v1.py" \ No newline at end of file diff --git a/.kokoro/trampoline_v2.sh b/.kokoro/trampoline_v2.sh new file mode 100755 index 00000000..719bcd5b --- /dev/null +++ b/.kokoro/trampoline_v2.sh @@ -0,0 +1,487 @@ +#!/usr/bin/env bash +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# trampoline_v2.sh +# +# This script does 3 things. +# +# 1. Prepare the Docker image for the test +# 2. Run the Docker with appropriate flags to run the test +# 3. Upload the newly built Docker image +# +# in a way that is somewhat compatible with trampoline_v1. +# +# To run this script, first download few files from gcs to /dev/shm. +# (/dev/shm is passed into the container as KOKORO_GFILE_DIR). +# +# gsutil cp gs://cloud-devrel-kokoro-resources/python-docs-samples/secrets_viewer_service_account.json /dev/shm +# gsutil cp gs://cloud-devrel-kokoro-resources/python-docs-samples/automl_secrets.txt /dev/shm +# +# Then run the script. +# .kokoro/trampoline_v2.sh +# +# These environment variables are required: +# TRAMPOLINE_IMAGE: The docker image to use. +# TRAMPOLINE_DOCKERFILE: The location of the Dockerfile. +# +# You can optionally change these environment variables: +# TRAMPOLINE_IMAGE_UPLOAD: +# (true|false): Whether to upload the Docker image after the +# successful builds. +# TRAMPOLINE_BUILD_FILE: The script to run in the docker container. +# TRAMPOLINE_WORKSPACE: The workspace path in the docker container. +# Defaults to /workspace. +# Potentially there are some repo specific envvars in .trampolinerc in +# the project root. + + +set -euo pipefail + +TRAMPOLINE_VERSION="2.0.5" + +if command -v tput >/dev/null && [[ -n "${TERM:-}" ]]; then + readonly IO_COLOR_RED="$(tput setaf 1)" + readonly IO_COLOR_GREEN="$(tput setaf 2)" + readonly IO_COLOR_YELLOW="$(tput setaf 3)" + readonly IO_COLOR_RESET="$(tput sgr0)" +else + readonly IO_COLOR_RED="" + readonly IO_COLOR_GREEN="" + readonly IO_COLOR_YELLOW="" + readonly IO_COLOR_RESET="" +fi + +function function_exists { + [ $(LC_ALL=C type -t $1)"" == "function" ] +} + +# Logs a message using the given color. The first argument must be one +# of the IO_COLOR_* variables defined above, such as +# "${IO_COLOR_YELLOW}". The remaining arguments will be logged in the +# given color. The log message will also have an RFC-3339 timestamp +# prepended (in UTC). You can disable the color output by setting +# TERM=vt100. +function log_impl() { + local color="$1" + shift + local timestamp="$(date -u "+%Y-%m-%dT%H:%M:%SZ")" + echo "================================================================" + echo "${color}${timestamp}:" "$@" "${IO_COLOR_RESET}" + echo "================================================================" +} + +# Logs the given message with normal coloring and a timestamp. +function log() { + log_impl "${IO_COLOR_RESET}" "$@" +} + +# Logs the given message in green with a timestamp. +function log_green() { + log_impl "${IO_COLOR_GREEN}" "$@" +} + +# Logs the given message in yellow with a timestamp. +function log_yellow() { + log_impl "${IO_COLOR_YELLOW}" "$@" +} + +# Logs the given message in red with a timestamp. +function log_red() { + log_impl "${IO_COLOR_RED}" "$@" +} + +readonly tmpdir=$(mktemp -d -t ci-XXXXXXXX) +readonly tmphome="${tmpdir}/h" +mkdir -p "${tmphome}" + +function cleanup() { + rm -rf "${tmpdir}" +} +trap cleanup EXIT + +RUNNING_IN_CI="${RUNNING_IN_CI:-false}" + +# The workspace in the container, defaults to /workspace. +TRAMPOLINE_WORKSPACE="${TRAMPOLINE_WORKSPACE:-/workspace}" + +pass_down_envvars=( + # TRAMPOLINE_V2 variables. + # Tells scripts whether they are running as part of CI or not. + "RUNNING_IN_CI" + # Indicates which CI system we're in. + "TRAMPOLINE_CI" + # Indicates the version of the script. + "TRAMPOLINE_VERSION" +) + +log_yellow "Building with Trampoline ${TRAMPOLINE_VERSION}" + +# Detect which CI systems we're in. If we're in any of the CI systems +# we support, `RUNNING_IN_CI` will be true and `TRAMPOLINE_CI` will be +# the name of the CI system. Both envvars will be passing down to the +# container for telling which CI system we're in. +if [[ -n "${KOKORO_BUILD_ID:-}" ]]; then + # descriptive env var for indicating it's on CI. + RUNNING_IN_CI="true" + TRAMPOLINE_CI="kokoro" + if [[ "${TRAMPOLINE_USE_LEGACY_SERVICE_ACCOUNT:-}" == "true" ]]; then + if [[ ! -f "${KOKORO_GFILE_DIR}/kokoro-trampoline.service-account.json" ]]; then + log_red "${KOKORO_GFILE_DIR}/kokoro-trampoline.service-account.json does not exist. Did you forget to mount cloud-devrel-kokoro-resources/trampoline? Aborting." + exit 1 + fi + # This service account will be activated later. + TRAMPOLINE_SERVICE_ACCOUNT="${KOKORO_GFILE_DIR}/kokoro-trampoline.service-account.json" + else + if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then + gcloud auth list + fi + log_yellow "Configuring Container Registry access" + gcloud auth configure-docker --quiet + fi + pass_down_envvars+=( + # KOKORO dynamic variables. + "KOKORO_BUILD_NUMBER" + "KOKORO_BUILD_ID" + "KOKORO_JOB_NAME" + "KOKORO_GIT_COMMIT" + "KOKORO_GITHUB_COMMIT" + "KOKORO_GITHUB_PULL_REQUEST_NUMBER" + "KOKORO_GITHUB_PULL_REQUEST_COMMIT" + # For Build Cop Bot + "KOKORO_GITHUB_COMMIT_URL" + "KOKORO_GITHUB_PULL_REQUEST_URL" + ) +elif [[ "${TRAVIS:-}" == "true" ]]; then + RUNNING_IN_CI="true" + TRAMPOLINE_CI="travis" + pass_down_envvars+=( + "TRAVIS_BRANCH" + "TRAVIS_BUILD_ID" + "TRAVIS_BUILD_NUMBER" + "TRAVIS_BUILD_WEB_URL" + "TRAVIS_COMMIT" + "TRAVIS_COMMIT_MESSAGE" + "TRAVIS_COMMIT_RANGE" + "TRAVIS_JOB_NAME" + "TRAVIS_JOB_NUMBER" + "TRAVIS_JOB_WEB_URL" + "TRAVIS_PULL_REQUEST" + "TRAVIS_PULL_REQUEST_BRANCH" + "TRAVIS_PULL_REQUEST_SHA" + "TRAVIS_PULL_REQUEST_SLUG" + "TRAVIS_REPO_SLUG" + "TRAVIS_SECURE_ENV_VARS" + "TRAVIS_TAG" + ) +elif [[ -n "${GITHUB_RUN_ID:-}" ]]; then + RUNNING_IN_CI="true" + TRAMPOLINE_CI="github-workflow" + pass_down_envvars+=( + "GITHUB_WORKFLOW" + "GITHUB_RUN_ID" + "GITHUB_RUN_NUMBER" + "GITHUB_ACTION" + "GITHUB_ACTIONS" + "GITHUB_ACTOR" + "GITHUB_REPOSITORY" + "GITHUB_EVENT_NAME" + "GITHUB_EVENT_PATH" + "GITHUB_SHA" + "GITHUB_REF" + "GITHUB_HEAD_REF" + "GITHUB_BASE_REF" + ) +elif [[ "${CIRCLECI:-}" == "true" ]]; then + RUNNING_IN_CI="true" + TRAMPOLINE_CI="circleci" + pass_down_envvars+=( + "CIRCLE_BRANCH" + "CIRCLE_BUILD_NUM" + "CIRCLE_BUILD_URL" + "CIRCLE_COMPARE_URL" + "CIRCLE_JOB" + "CIRCLE_NODE_INDEX" + "CIRCLE_NODE_TOTAL" + "CIRCLE_PREVIOUS_BUILD_NUM" + "CIRCLE_PROJECT_REPONAME" + "CIRCLE_PROJECT_USERNAME" + "CIRCLE_REPOSITORY_URL" + "CIRCLE_SHA1" + "CIRCLE_STAGE" + "CIRCLE_USERNAME" + "CIRCLE_WORKFLOW_ID" + "CIRCLE_WORKFLOW_JOB_ID" + "CIRCLE_WORKFLOW_UPSTREAM_JOB_IDS" + "CIRCLE_WORKFLOW_WORKSPACE_ID" + ) +fi + +# Configure the service account for pulling the docker image. +function repo_root() { + local dir="$1" + while [[ ! -d "${dir}/.git" ]]; do + dir="$(dirname "$dir")" + done + echo "${dir}" +} + +# Detect the project root. In CI builds, we assume the script is in +# the git tree and traverse from there, otherwise, traverse from `pwd` +# to find `.git` directory. +if [[ "${RUNNING_IN_CI:-}" == "true" ]]; then + PROGRAM_PATH="$(realpath "$0")" + PROGRAM_DIR="$(dirname "${PROGRAM_PATH}")" + PROJECT_ROOT="$(repo_root "${PROGRAM_DIR}")" +else + PROJECT_ROOT="$(repo_root $(pwd))" +fi + +log_yellow "Changing to the project root: ${PROJECT_ROOT}." +cd "${PROJECT_ROOT}" + +# To support relative path for `TRAMPOLINE_SERVICE_ACCOUNT`, we need +# to use this environment variable in `PROJECT_ROOT`. +if [[ -n "${TRAMPOLINE_SERVICE_ACCOUNT:-}" ]]; then + + mkdir -p "${tmpdir}/gcloud" + gcloud_config_dir="${tmpdir}/gcloud" + + log_yellow "Using isolated gcloud config: ${gcloud_config_dir}." + export CLOUDSDK_CONFIG="${gcloud_config_dir}" + + log_yellow "Using ${TRAMPOLINE_SERVICE_ACCOUNT} for authentication." + gcloud auth activate-service-account \ + --key-file "${TRAMPOLINE_SERVICE_ACCOUNT}" + log_yellow "Configuring Container Registry access" + gcloud auth configure-docker --quiet +fi + +required_envvars=( + # The basic trampoline configurations. + "TRAMPOLINE_IMAGE" + "TRAMPOLINE_BUILD_FILE" +) + +if [[ -f "${PROJECT_ROOT}/.trampolinerc" ]]; then + source "${PROJECT_ROOT}/.trampolinerc" +fi + +log_yellow "Checking environment variables." +for e in "${required_envvars[@]}" +do + if [[ -z "${!e:-}" ]]; then + log "Missing ${e} env var. Aborting." + exit 1 + fi +done + +# We want to support legacy style TRAMPOLINE_BUILD_FILE used with V1 +# script: e.g. "github/repo-name/.kokoro/run_tests.sh" +TRAMPOLINE_BUILD_FILE="${TRAMPOLINE_BUILD_FILE#github/*/}" +log_yellow "Using TRAMPOLINE_BUILD_FILE: ${TRAMPOLINE_BUILD_FILE}" + +# ignore error on docker operations and test execution +set +e + +log_yellow "Preparing Docker image." +# We only download the docker image in CI builds. +if [[ "${RUNNING_IN_CI:-}" == "true" ]]; then + # Download the docker image specified by `TRAMPOLINE_IMAGE` + + # We may want to add --max-concurrent-downloads flag. + + log_yellow "Start pulling the Docker image: ${TRAMPOLINE_IMAGE}." + if docker pull "${TRAMPOLINE_IMAGE}"; then + log_green "Finished pulling the Docker image: ${TRAMPOLINE_IMAGE}." + has_image="true" + else + log_red "Failed pulling the Docker image: ${TRAMPOLINE_IMAGE}." + has_image="false" + fi +else + # For local run, check if we have the image. + if docker images "${TRAMPOLINE_IMAGE}:latest" | grep "${TRAMPOLINE_IMAGE}"; then + has_image="true" + else + has_image="false" + fi +fi + + +# The default user for a Docker container has uid 0 (root). To avoid +# creating root-owned files in the build directory we tell docker to +# use the current user ID. +user_uid="$(id -u)" +user_gid="$(id -g)" +user_name="$(id -un)" + +# To allow docker in docker, we add the user to the docker group in +# the host os. +docker_gid=$(cut -d: -f3 < <(getent group docker)) + +update_cache="false" +if [[ "${TRAMPOLINE_DOCKERFILE:-none}" != "none" ]]; then + # Build the Docker image from the source. + context_dir=$(dirname "${TRAMPOLINE_DOCKERFILE}") + docker_build_flags=( + "-f" "${TRAMPOLINE_DOCKERFILE}" + "-t" "${TRAMPOLINE_IMAGE}" + "--build-arg" "UID=${user_uid}" + "--build-arg" "USERNAME=${user_name}" + ) + if [[ "${has_image}" == "true" ]]; then + docker_build_flags+=("--cache-from" "${TRAMPOLINE_IMAGE}") + fi + + log_yellow "Start building the docker image." + if [[ "${TRAMPOLINE_VERBOSE:-false}" == "true" ]]; then + echo "docker build" "${docker_build_flags[@]}" "${context_dir}" + fi + + # ON CI systems, we want to suppress docker build logs, only + # output the logs when it fails. + if [[ "${RUNNING_IN_CI:-}" == "true" ]]; then + if docker build "${docker_build_flags[@]}" "${context_dir}" \ + > "${tmpdir}/docker_build.log" 2>&1; then + if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then + cat "${tmpdir}/docker_build.log" + fi + + log_green "Finished building the docker image." + update_cache="true" + else + log_red "Failed to build the Docker image, aborting." + log_yellow "Dumping the build logs:" + cat "${tmpdir}/docker_build.log" + exit 1 + fi + else + if docker build "${docker_build_flags[@]}" "${context_dir}"; then + log_green "Finished building the docker image." + update_cache="true" + else + log_red "Failed to build the Docker image, aborting." + exit 1 + fi + fi +else + if [[ "${has_image}" != "true" ]]; then + log_red "We do not have ${TRAMPOLINE_IMAGE} locally, aborting." + exit 1 + fi +fi + +# We use an array for the flags so they are easier to document. +docker_flags=( + # Remove the container after it exists. + "--rm" + + # Use the host network. + "--network=host" + + # Run in priviledged mode. We are not using docker for sandboxing or + # isolation, just for packaging our dev tools. + "--privileged" + + # Run the docker script with the user id. Because the docker image gets to + # write in ${PWD} you typically want this to be your user id. + # To allow docker in docker, we need to use docker gid on the host. + "--user" "${user_uid}:${docker_gid}" + + # Pass down the USER. + "--env" "USER=${user_name}" + + # Mount the project directory inside the Docker container. + "--volume" "${PROJECT_ROOT}:${TRAMPOLINE_WORKSPACE}" + "--workdir" "${TRAMPOLINE_WORKSPACE}" + "--env" "PROJECT_ROOT=${TRAMPOLINE_WORKSPACE}" + + # Mount the temporary home directory. + "--volume" "${tmphome}:/h" + "--env" "HOME=/h" + + # Allow docker in docker. + "--volume" "/var/run/docker.sock:/var/run/docker.sock" + + # Mount the /tmp so that docker in docker can mount the files + # there correctly. + "--volume" "/tmp:/tmp" + # Pass down the KOKORO_GFILE_DIR and KOKORO_KEYSTORE_DIR + # TODO(tmatsuo): This part is not portable. + "--env" "TRAMPOLINE_SECRET_DIR=/secrets" + "--volume" "${KOKORO_GFILE_DIR:-/dev/shm}:/secrets/gfile" + "--env" "KOKORO_GFILE_DIR=/secrets/gfile" + "--volume" "${KOKORO_KEYSTORE_DIR:-/dev/shm}:/secrets/keystore" + "--env" "KOKORO_KEYSTORE_DIR=/secrets/keystore" +) + +# Add an option for nicer output if the build gets a tty. +if [[ -t 0 ]]; then + docker_flags+=("-it") +fi + +# Passing down env vars +for e in "${pass_down_envvars[@]}" +do + if [[ -n "${!e:-}" ]]; then + docker_flags+=("--env" "${e}=${!e}") + fi +done + +# If arguments are given, all arguments will become the commands run +# in the container, otherwise run TRAMPOLINE_BUILD_FILE. +if [[ $# -ge 1 ]]; then + log_yellow "Running the given commands '" "${@:1}" "' in the container." + readonly commands=("${@:1}") + if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then + echo docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}" "${commands[@]}" + fi + docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}" "${commands[@]}" +else + log_yellow "Running the tests in a Docker container." + docker_flags+=("--entrypoint=${TRAMPOLINE_BUILD_FILE}") + if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then + echo docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}" + fi + docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}" +fi + + +test_retval=$? + +if [[ ${test_retval} -eq 0 ]]; then + log_green "Build finished with ${test_retval}" +else + log_red "Build finished with ${test_retval}" +fi + +# Only upload it when the test passes. +if [[ "${update_cache}" == "true" ]] && \ + [[ $test_retval == 0 ]] && \ + [[ "${TRAMPOLINE_IMAGE_UPLOAD:-false}" == "true" ]]; then + log_yellow "Uploading the Docker image." + if docker push "${TRAMPOLINE_IMAGE}"; then + log_green "Finished uploading the Docker image." + else + log_red "Failed uploading the Docker image." + fi + # Call trampoline_after_upload_hook if it's defined. + if function_exists trampoline_after_upload_hook; then + trampoline_after_upload_hook + fi + +fi + +exit "${test_retval}" diff --git a/.trampolinerc b/.trampolinerc new file mode 100644 index 00000000..995ee291 --- /dev/null +++ b/.trampolinerc @@ -0,0 +1,51 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Template for .trampolinerc + +# Add required env vars here. +required_envvars+=( + "STAGING_BUCKET" + "V2_STAGING_BUCKET" +) + +# Add env vars which are passed down into the container here. +pass_down_envvars+=( + "STAGING_BUCKET" + "V2_STAGING_BUCKET" +) + +# Prevent unintentional override on the default image. +if [[ "${TRAMPOLINE_IMAGE_UPLOAD:-false}" == "true" ]] && \ + [[ -z "${TRAMPOLINE_IMAGE:-}" ]]; then + echo "Please set TRAMPOLINE_IMAGE if you want to upload the Docker image." + exit 1 +fi + +# Define the default value if it makes sense. +if [[ -z "${TRAMPOLINE_IMAGE_UPLOAD:-}" ]]; then + TRAMPOLINE_IMAGE_UPLOAD="" +fi + +if [[ -z "${TRAMPOLINE_IMAGE:-}" ]]; then + TRAMPOLINE_IMAGE="" +fi + +if [[ -z "${TRAMPOLINE_DOCKERFILE:-}" ]]; then + TRAMPOLINE_DOCKERFILE="" +fi + +if [[ -z "${TRAMPOLINE_BUILD_FILE:-}" ]]; then + TRAMPOLINE_BUILD_FILE="" +fi diff --git a/docs/conf.py b/docs/conf.py index e971bae7..7bb0b352 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -20,12 +20,16 @@ # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath("..")) +# For plugins that can not read conf.py. +# See also: https://github.com/docascode/sphinx-docfx-yaml/issues/85 +sys.path.insert(0, os.path.abspath(".")) + __version__ = "" # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. -needs_sphinx = "1.6.3" +needs_sphinx = "1.5.5" # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom @@ -90,7 +94,12 @@ # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. -exclude_patterns = ["_build"] +exclude_patterns = [ + "_build", + "samples/AUTHORING_GUIDE.md", + "samples/CONTRIBUTING.md", + "samples/snippets/README.rst", +] # The reST default role (used for this markup: `text`) to use for all # documents. diff --git a/google/cloud/container_v1/services/cluster_manager/async_client.py b/google/cloud/container_v1/services/cluster_manager/async_client.py index 23b47e09..cfe4b916 100644 --- a/google/cloud/container_v1/services/cluster_manager/async_client.py +++ b/google/cloud/container_v1/services/cluster_manager/async_client.py @@ -31,7 +31,7 @@ from google.cloud.container_v1.services.cluster_manager import pagers from google.cloud.container_v1.types import cluster_service -from .transports.base import ClusterManagerTransport +from .transports.base import ClusterManagerTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import ClusterManagerGrpcAsyncIOTransport from .client import ClusterManagerClient @@ -57,6 +57,7 @@ def __init__( credentials: credentials.Credentials = None, transport: Union[str, ClusterManagerTransport] = "grpc_asyncio", client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiate the cluster manager client. @@ -72,16 +73,19 @@ def __init__( client_options (ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT environment variable can also be used to override the endpoint: "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint, this is the default value for - the environment variable) and "auto" (auto switch to the default - mTLS endpoint if client SSL credentials is present). However, - the ``api_endpoint`` property takes precedence if provided. - (2) The ``client_cert_source`` property is used to provide client - SSL credentials for mutual TLS transport. If not provided, the - default SSL credentials will be used if present. + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. Raises: google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport @@ -89,7 +93,10 @@ def __init__( """ self._client = ClusterManagerClient( - credentials=credentials, transport=transport, client_options=client_options, + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, ) async def list_clusters( @@ -173,8 +180,16 @@ async def list_clusters( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_clusters, - default_timeout=None, - client_info=_client_info, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=20.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -277,8 +292,16 @@ async def get_cluster( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_cluster, - default_timeout=None, - client_info=_client_info, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=20.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -395,8 +418,8 @@ async def create_cluster( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.create_cluster, - default_timeout=None, - client_info=_client_info, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -512,8 +535,8 @@ async def update_cluster( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.update_cluster, - default_timeout=None, - client_info=_client_info, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -566,8 +589,8 @@ async def update_node_pool( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.update_node_pool, - default_timeout=None, - client_info=_client_info, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -620,8 +643,8 @@ async def set_node_pool_autoscaling( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.set_node_pool_autoscaling, - default_timeout=None, - client_info=_client_info, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -743,8 +766,8 @@ async def set_logging_service( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.set_logging_service, - default_timeout=None, - client_info=_client_info, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -869,8 +892,8 @@ async def set_monitoring_service( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.set_monitoring_service, - default_timeout=None, - client_info=_client_info, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -989,8 +1012,8 @@ async def set_addons_config( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.set_addons_config, - default_timeout=None, - client_info=_client_info, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -1113,8 +1136,8 @@ async def set_locations( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.set_locations, - default_timeout=None, - client_info=_client_info, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -1244,8 +1267,8 @@ async def update_master( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.update_master, - default_timeout=None, - client_info=_client_info, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -1300,8 +1323,8 @@ async def set_master_auth( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.set_master_auth, - default_timeout=None, - client_info=_client_info, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -1417,8 +1440,16 @@ async def delete_cluster( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_cluster, - default_timeout=None, - client_info=_client_info, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=20.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -1504,8 +1535,16 @@ async def list_operations( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_operations, - default_timeout=None, - client_info=_client_info, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=20.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -1601,8 +1640,16 @@ async def get_operation( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_operation, - default_timeout=None, - client_info=_client_info, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=20.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -1700,8 +1747,8 @@ async def cancel_operation( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.cancel_operation, - default_timeout=None, - client_info=_client_info, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -1794,8 +1841,16 @@ async def get_server_config( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_server_config, - default_timeout=None, - client_info=_client_info, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=20.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -1899,8 +1954,16 @@ async def list_node_pools( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_node_pools, - default_timeout=None, - client_info=_client_info, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=20.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -2024,8 +2087,16 @@ async def get_node_pool( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_node_pool, - default_timeout=None, - client_info=_client_info, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=20.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -2141,8 +2212,8 @@ async def create_node_pool( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.create_node_pool, - default_timeout=None, - client_info=_client_info, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -2261,8 +2332,16 @@ async def delete_node_pool( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_node_pool, - default_timeout=None, - client_info=_client_info, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=20.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -2387,8 +2466,8 @@ async def rollback_node_pool_upgrade( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.rollback_node_pool_upgrade, - default_timeout=None, - client_info=_client_info, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -2440,8 +2519,8 @@ async def set_node_pool_management( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.set_node_pool_management, - default_timeout=None, - client_info=_client_info, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -2495,8 +2574,8 @@ async def set_labels( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.set_labels, - default_timeout=None, - client_info=_client_info, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -2613,8 +2692,8 @@ async def set_legacy_abac( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.set_legacy_abac, - default_timeout=None, - client_info=_client_info, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -2721,8 +2800,8 @@ async def start_ip_rotation( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.start_ip_rotation, - default_timeout=None, - client_info=_client_info, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -2828,8 +2907,8 @@ async def complete_ip_rotation( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.complete_ip_rotation, - default_timeout=None, - client_info=_client_info, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -2881,8 +2960,8 @@ async def set_node_pool_size( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.set_node_pool_size, - default_timeout=None, - client_info=_client_info, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -2999,8 +3078,8 @@ async def set_network_policy( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.set_network_policy, - default_timeout=None, - client_info=_client_info, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -3114,8 +3193,8 @@ async def set_maintenance_policy( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.set_maintenance_policy, - default_timeout=None, - client_info=_client_info, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -3173,7 +3252,7 @@ async def list_usable_subnetworks( rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_usable_subnetworks, default_timeout=None, - client_info=_client_info, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -3196,11 +3275,11 @@ async def list_usable_subnetworks( try: - _client_info = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution("google-container",).version, + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-container",).version, ) except pkg_resources.DistributionNotFound: - _client_info = gapic_v1.client_info.ClientInfo() + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() __all__ = ("ClusterManagerAsyncClient",) diff --git a/google/cloud/container_v1/services/cluster_manager/client.py b/google/cloud/container_v1/services/cluster_manager/client.py index 42e89bfb..3a0b8913 100644 --- a/google/cloud/container_v1/services/cluster_manager/client.py +++ b/google/cloud/container_v1/services/cluster_manager/client.py @@ -16,6 +16,7 @@ # from collections import OrderedDict +from distutils import util import os import re from typing import Callable, Dict, Sequence, Tuple, Type, Union @@ -27,13 +28,14 @@ from google.api_core import retry as retries # type: ignore from google.auth import credentials # type: ignore from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore from google.cloud.container_v1.services.cluster_manager import pagers from google.cloud.container_v1.types import cluster_service -from .transports.base import ClusterManagerTransport +from .transports.base import ClusterManagerTransport, DEFAULT_CLIENT_INFO from .transports.grpc import ClusterManagerGrpcTransport from .transports.grpc_asyncio import ClusterManagerGrpcAsyncIOTransport @@ -134,6 +136,7 @@ def __init__( credentials: credentials.Credentials = None, transport: Union[str, ClusterManagerTransport] = None, client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiate the cluster manager client. @@ -149,16 +152,24 @@ def __init__( client_options (ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT environment variable can also be used to override the endpoint: "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint, this is the default value for - the environment variable) and "auto" (auto switch to the default - mTLS endpoint if client SSL credentials is present). However, - the ``api_endpoint`` property takes precedence if provided. - (2) The ``client_cert_source`` property is used to provide client - SSL credentials for mutual TLS transport. If not provided, the - default SSL credentials will be used if present. + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport @@ -169,25 +180,43 @@ def __init__( if client_options is None: client_options = ClientOptions.ClientOptions() - if client_options.api_endpoint is None: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS", "never") + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool( + util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) + ) + + ssl_credentials = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + import grpc # type: ignore + + cert, key = client_options.client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + is_mtls = True + else: + creds = SslCredentials() + is_mtls = creds.is_mtls + ssl_credentials = creds.ssl_credentials if is_mtls else None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") if use_mtls_env == "never": - client_options.api_endpoint = self.DEFAULT_ENDPOINT + api_endpoint = self.DEFAULT_ENDPOINT elif use_mtls_env == "always": - client_options.api_endpoint = self.DEFAULT_MTLS_ENDPOINT + api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": - has_client_cert_source = ( - client_options.client_cert_source is not None - or mtls.has_default_client_cert_source() - ) - client_options.api_endpoint = ( - self.DEFAULT_MTLS_ENDPOINT - if has_client_cert_source - else self.DEFAULT_ENDPOINT + api_endpoint = ( + self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT ) else: raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS value. Accepted values: never, auto, always" + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" ) # Save or instantiate the transport. @@ -211,10 +240,11 @@ def __init__( self._transport = Transport( credentials=credentials, credentials_file=client_options.credentials_file, - host=client_options.api_endpoint, + host=api_endpoint, scopes=client_options.scopes, - api_mtls_endpoint=client_options.api_endpoint, - client_cert_source=client_options.client_cert_source, + ssl_channel_credentials=ssl_credentials, + quota_project_id=client_options.quota_project_id, + client_info=client_info, ) def list_clusters( @@ -276,31 +306,33 @@ def list_clusters( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([project_id, zone, parent]): + has_flattened_params = any([project_id, zone, parent]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.ListClustersRequest(request) + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.ListClustersRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.ListClustersRequest): + request = cluster_service.ListClustersRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. + # If we have keyword arguments corresponding to fields on the + # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if parent is not None: - request.parent = parent + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if parent is not None: + request.parent = parent # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.list_clusters, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.list_clusters] # Certain fields should be provided within the metadata header; # add these here. @@ -378,31 +410,35 @@ def get_cluster( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([project_id, zone, cluster_id, name]): + has_flattened_params = any([project_id, zone, cluster_id, name]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.GetClusterRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if name is not None: - request.name = name + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.GetClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.GetClusterRequest): + request = cluster_service.GetClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if name is not None: + request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.get_cluster, default_timeout=None, client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.get_cluster] # Certain fields should be provided within the metadata header; # add these here. @@ -494,33 +530,35 @@ def create_cluster( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([project_id, zone, cluster, parent]): + has_flattened_params = any([project_id, zone, cluster, parent]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.CreateClusterRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster is not None: - request.cluster = cluster - if parent is not None: - request.parent = parent + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.CreateClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.CreateClusterRequest): + request = cluster_service.CreateClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster is not None: + request.cluster = cluster + if parent is not None: + request.parent = parent # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.create_cluster, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.create_cluster] # Certain fields should be provided within the metadata header; # add these here. @@ -609,35 +647,37 @@ def update_cluster( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([project_id, zone, cluster_id, update, name]): + has_flattened_params = any([project_id, zone, cluster_id, update, name]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.UpdateClusterRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if update is not None: - request.update = update - if name is not None: - request.name = name + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.UpdateClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.UpdateClusterRequest): + request = cluster_service.UpdateClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if update is not None: + request.update = update + if name is not None: + request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.update_cluster, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.update_cluster] # Certain fields should be provided within the metadata header; # add these here. @@ -683,15 +723,16 @@ def update_node_pool( """ # Create or coerce a protobuf request object. - request = cluster_service.UpdateNodePoolRequest(request) + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.UpdateNodePoolRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.UpdateNodePoolRequest): + request = cluster_service.UpdateNodePoolRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.update_node_pool, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.update_node_pool] # Certain fields should be provided within the metadata header; # add these here. @@ -737,15 +778,18 @@ def set_node_pool_autoscaling( """ # Create or coerce a protobuf request object. - request = cluster_service.SetNodePoolAutoscalingRequest(request) + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.SetNodePoolAutoscalingRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.SetNodePoolAutoscalingRequest): + request = cluster_service.SetNodePoolAutoscalingRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.set_node_pool_autoscaling, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[ + self._transport.set_node_pool_autoscaling + ] # Certain fields should be provided within the metadata header; # add these here. @@ -838,37 +882,39 @@ def set_logging_service( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any( + has_flattened_params = any( [project_id, zone, cluster_id, logging_service, name] - ): + ) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.SetLoggingServiceRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if logging_service is not None: - request.logging_service = logging_service - if name is not None: - request.name = name + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.SetLoggingServiceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.SetLoggingServiceRequest): + request = cluster_service.SetLoggingServiceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if logging_service is not None: + request.logging_service = logging_service + if name is not None: + request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.set_logging_service, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.set_logging_service] # Certain fields should be provided within the metadata header; # add these here. @@ -964,37 +1010,39 @@ def set_monitoring_service( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any( + has_flattened_params = any( [project_id, zone, cluster_id, monitoring_service, name] - ): + ) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.SetMonitoringServiceRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if monitoring_service is not None: - request.monitoring_service = monitoring_service - if name is not None: - request.name = name + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.SetMonitoringServiceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.SetMonitoringServiceRequest): + request = cluster_service.SetMonitoringServiceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if monitoring_service is not None: + request.monitoring_service = monitoring_service + if name is not None: + request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.set_monitoring_service, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.set_monitoring_service] # Certain fields should be provided within the metadata header; # add these here. @@ -1084,37 +1132,37 @@ def set_addons_config( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any( - [project_id, zone, cluster_id, addons_config, name] - ): + has_flattened_params = any([project_id, zone, cluster_id, addons_config, name]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.SetAddonsConfigRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if addons_config is not None: - request.addons_config = addons_config - if name is not None: - request.name = name + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.SetAddonsConfigRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.SetAddonsConfigRequest): + request = cluster_service.SetAddonsConfigRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if addons_config is not None: + request.addons_config = addons_config + if name is not None: + request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.set_addons_config, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.set_addons_config] # Certain fields should be provided within the metadata header; # add these here. @@ -1210,35 +1258,37 @@ def set_locations( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([project_id, zone, cluster_id, locations, name]): + has_flattened_params = any([project_id, zone, cluster_id, locations, name]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.SetLocationsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if locations is not None: - request.locations = locations - if name is not None: - request.name = name + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.SetLocationsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.SetLocationsRequest): + request = cluster_service.SetLocationsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if locations is not None: + request.locations = locations + if name is not None: + request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.set_locations, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.set_locations] # Certain fields should be provided within the metadata header; # add these here. @@ -1339,37 +1389,37 @@ def update_master( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any( - [project_id, zone, cluster_id, master_version, name] - ): + has_flattened_params = any([project_id, zone, cluster_id, master_version, name]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.UpdateMasterRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if master_version is not None: - request.master_version = master_version - if name is not None: - request.name = name + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.UpdateMasterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.UpdateMasterRequest): + request = cluster_service.UpdateMasterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if master_version is not None: + request.master_version = master_version + if name is not None: + request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.update_master, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.update_master] # Certain fields should be provided within the metadata header; # add these here. @@ -1417,15 +1467,16 @@ def set_master_auth( """ # Create or coerce a protobuf request object. - request = cluster_service.SetMasterAuthRequest(request) + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.SetMasterAuthRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.SetMasterAuthRequest): + request = cluster_service.SetMasterAuthRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.set_master_auth, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.set_master_auth] # Certain fields should be provided within the metadata header; # add these here. @@ -1516,33 +1567,35 @@ def delete_cluster( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([project_id, zone, cluster_id, name]): + has_flattened_params = any([project_id, zone, cluster_id, name]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.DeleteClusterRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if name is not None: - request.name = name + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.DeleteClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.DeleteClusterRequest): + request = cluster_service.DeleteClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if name is not None: + request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.delete_cluster, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.delete_cluster] # Certain fields should be provided within the metadata header; # add these here. @@ -1607,29 +1660,31 @@ def list_operations( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([project_id, zone]): + has_flattened_params = any([project_id, zone]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.ListOperationsRequest(request) + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.ListOperationsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.ListOperationsRequest): + request = cluster_service.ListOperationsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. + # If we have keyword arguments corresponding to fields on the + # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.list_operations, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.list_operations] # Certain fields should be provided within the metadata header; # add these here. @@ -1702,31 +1757,33 @@ def get_operation( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([project_id, zone, operation_id]): + has_flattened_params = any([project_id, zone, operation_id]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.GetOperationRequest(request) + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.GetOperationRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.GetOperationRequest): + request = cluster_service.GetOperationRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. + # If we have keyword arguments corresponding to fields on the + # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if operation_id is not None: - request.operation_id = operation_id + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if operation_id is not None: + request.operation_id = operation_id # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.get_operation, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.get_operation] # Certain fields should be provided within the metadata header; # add these here. @@ -1799,33 +1856,35 @@ def cancel_operation( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([project_id, zone, operation_id, name]): + has_flattened_params = any([project_id, zone, operation_id, name]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.CancelOperationRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if operation_id is not None: - request.operation_id = operation_id - if name is not None: - request.name = name + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.CancelOperationRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.CancelOperationRequest): + request = cluster_service.CancelOperationRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if operation_id is not None: + request.operation_id = operation_id + if name is not None: + request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.cancel_operation, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.cancel_operation] # Certain fields should be provided within the metadata header; # add these here. @@ -1895,31 +1954,33 @@ def get_server_config( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([project_id, zone, name]): + has_flattened_params = any([project_id, zone, name]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.GetServerConfigRequest(request) + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.GetServerConfigRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.GetServerConfigRequest): + request = cluster_service.GetServerConfigRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. + # If we have keyword arguments corresponding to fields on the + # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if name is not None: - request.name = name + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if name is not None: + request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.get_server_config, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.get_server_config] # Certain fields should be provided within the metadata header; # add these here. @@ -1998,33 +2059,35 @@ def list_node_pools( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([project_id, zone, cluster_id, parent]): + has_flattened_params = any([project_id, zone, cluster_id, parent]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.ListNodePoolsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if parent is not None: - request.parent = parent + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.ListNodePoolsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.ListNodePoolsRequest): + request = cluster_service.ListNodePoolsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if parent is not None: + request.parent = parent # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.list_node_pools, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.list_node_pools] # Certain fields should be provided within the metadata header; # add these here. @@ -2119,37 +2182,37 @@ def get_node_pool( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any( - [project_id, zone, cluster_id, node_pool_id, name] - ): + has_flattened_params = any([project_id, zone, cluster_id, node_pool_id, name]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.GetNodePoolRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if node_pool_id is not None: - request.node_pool_id = node_pool_id - if name is not None: - request.name = name + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.GetNodePoolRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.GetNodePoolRequest): + request = cluster_service.GetNodePoolRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if node_pool_id is not None: + request.node_pool_id = node_pool_id + if name is not None: + request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.get_node_pool, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.get_node_pool] # Certain fields should be provided within the metadata header; # add these here. @@ -2236,37 +2299,37 @@ def create_node_pool( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any( - [project_id, zone, cluster_id, node_pool, parent] - ): + has_flattened_params = any([project_id, zone, cluster_id, node_pool, parent]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.CreateNodePoolRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if node_pool is not None: - request.node_pool = node_pool - if parent is not None: - request.parent = parent + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.CreateNodePoolRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.CreateNodePoolRequest): + request = cluster_service.CreateNodePoolRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if node_pool is not None: + request.node_pool = node_pool + if parent is not None: + request.parent = parent # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.create_node_pool, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.create_node_pool] # Certain fields should be provided within the metadata header; # add these here. @@ -2356,37 +2419,37 @@ def delete_node_pool( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any( - [project_id, zone, cluster_id, node_pool_id, name] - ): + has_flattened_params = any([project_id, zone, cluster_id, node_pool_id, name]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.DeleteNodePoolRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if node_pool_id is not None: - request.node_pool_id = node_pool_id - if name is not None: - request.name = name + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.DeleteNodePoolRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.DeleteNodePoolRequest): + request = cluster_service.DeleteNodePoolRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if node_pool_id is not None: + request.node_pool_id = node_pool_id + if name is not None: + request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.delete_node_pool, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.delete_node_pool] # Certain fields should be provided within the metadata header; # add these here. @@ -2482,37 +2545,39 @@ def rollback_node_pool_upgrade( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any( - [project_id, zone, cluster_id, node_pool_id, name] - ): + has_flattened_params = any([project_id, zone, cluster_id, node_pool_id, name]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.RollbackNodePoolUpgradeRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if node_pool_id is not None: - request.node_pool_id = node_pool_id - if name is not None: - request.name = name + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.RollbackNodePoolUpgradeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.RollbackNodePoolUpgradeRequest): + request = cluster_service.RollbackNodePoolUpgradeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if node_pool_id is not None: + request.node_pool_id = node_pool_id + if name is not None: + request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.rollback_node_pool_upgrade, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[ + self._transport.rollback_node_pool_upgrade + ] # Certain fields should be provided within the metadata header; # add these here. @@ -2557,15 +2622,16 @@ def set_node_pool_management( """ # Create or coerce a protobuf request object. - request = cluster_service.SetNodePoolManagementRequest(request) + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.SetNodePoolManagementRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.SetNodePoolManagementRequest): + request = cluster_service.SetNodePoolManagementRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.set_node_pool_management, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.set_node_pool_management] # Certain fields should be provided within the metadata header; # add these here. @@ -2612,13 +2678,16 @@ def set_labels( """ # Create or coerce a protobuf request object. - request = cluster_service.SetLabelsRequest(request) + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.SetLabelsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.SetLabelsRequest): + request = cluster_service.SetLabelsRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.set_labels, default_timeout=None, client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.set_labels] # Certain fields should be provided within the metadata header; # add these here. @@ -2708,35 +2777,37 @@ def set_legacy_abac( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([project_id, zone, cluster_id, enabled, name]): + has_flattened_params = any([project_id, zone, cluster_id, enabled, name]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.SetLegacyAbacRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if enabled is not None: - request.enabled = enabled - if name is not None: - request.name = name + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.SetLegacyAbacRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.SetLegacyAbacRequest): + request = cluster_service.SetLegacyAbacRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if enabled is not None: + request.enabled = enabled + if name is not None: + request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.set_legacy_abac, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.set_legacy_abac] # Certain fields should be provided within the metadata header; # add these here. @@ -2818,33 +2889,35 @@ def start_ip_rotation( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([project_id, zone, cluster_id, name]): + has_flattened_params = any([project_id, zone, cluster_id, name]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.StartIPRotationRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if name is not None: - request.name = name + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.StartIPRotationRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.StartIPRotationRequest): + request = cluster_service.StartIPRotationRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if name is not None: + request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.start_ip_rotation, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.start_ip_rotation] # Certain fields should be provided within the metadata header; # add these here. @@ -2925,33 +2998,35 @@ def complete_ip_rotation( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([project_id, zone, cluster_id, name]): + has_flattened_params = any([project_id, zone, cluster_id, name]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.CompleteIPRotationRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if name is not None: - request.name = name + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.CompleteIPRotationRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.CompleteIPRotationRequest): + request = cluster_service.CompleteIPRotationRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if name is not None: + request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.complete_ip_rotation, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.complete_ip_rotation] # Certain fields should be provided within the metadata header; # add these here. @@ -2996,15 +3071,16 @@ def set_node_pool_size( """ # Create or coerce a protobuf request object. - request = cluster_service.SetNodePoolSizeRequest(request) + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.SetNodePoolSizeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.SetNodePoolSizeRequest): + request = cluster_service.SetNodePoolSizeRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.set_node_pool_size, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.set_node_pool_size] # Certain fields should be provided within the metadata header; # add these here. @@ -3092,37 +3168,37 @@ def set_network_policy( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any( - [project_id, zone, cluster_id, network_policy, name] - ): + has_flattened_params = any([project_id, zone, cluster_id, network_policy, name]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.SetNetworkPolicyRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if network_policy is not None: - request.network_policy = network_policy - if name is not None: - request.name = name + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.SetNetworkPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.SetNetworkPolicyRequest): + request = cluster_service.SetNetworkPolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if network_policy is not None: + request.network_policy = network_policy + if name is not None: + request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.set_network_policy, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.set_network_policy] # Certain fields should be provided within the metadata header; # add these here. @@ -3207,37 +3283,39 @@ def set_maintenance_policy( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any( + has_flattened_params = any( [project_id, zone, cluster_id, maintenance_policy, name] - ): + ) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.SetMaintenancePolicyRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if maintenance_policy is not None: - request.maintenance_policy = maintenance_policy - if name is not None: - request.name = name + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.SetMaintenancePolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.SetMaintenancePolicyRequest): + request = cluster_service.SetMaintenancePolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if maintenance_policy is not None: + request.maintenance_policy = maintenance_policy + if name is not None: + request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.set_maintenance_policy, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.set_maintenance_policy] # Certain fields should be provided within the metadata header; # add these here. @@ -3287,15 +3365,16 @@ def list_usable_subnetworks( """ # Create or coerce a protobuf request object. - request = cluster_service.ListUsableSubnetworksRequest(request) + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.ListUsableSubnetworksRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.ListUsableSubnetworksRequest): + request = cluster_service.ListUsableSubnetworksRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.list_usable_subnetworks, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.list_usable_subnetworks] # Certain fields should be provided within the metadata header; # add these here. @@ -3317,11 +3396,11 @@ def list_usable_subnetworks( try: - _client_info = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution("google-container",).version, + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-container",).version, ) except pkg_resources.DistributionNotFound: - _client_info = gapic_v1.client_info.ClientInfo() + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() __all__ = ("ClusterManagerClient",) diff --git a/google/cloud/container_v1/services/cluster_manager/transports/base.py b/google/cloud/container_v1/services/cluster_manager/transports/base.py index 06635873..73b54881 100644 --- a/google/cloud/container_v1/services/cluster_manager/transports/base.py +++ b/google/cloud/container_v1/services/cluster_manager/transports/base.py @@ -17,15 +17,26 @@ import abc import typing +import pkg_resources -from google import auth +from google import auth # type: ignore from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore from google.auth import credentials # type: ignore from google.cloud.container_v1.types import cluster_service from google.protobuf import empty_pb2 as empty # type: ignore +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-container",).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + class ClusterManagerTransport(abc.ABC): """Abstract transport class for ClusterManager.""" @@ -38,6 +49,8 @@ def __init__( credentials: credentials.Credentials = None, credentials_file: typing.Optional[str] = None, scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES, + quota_project_id: typing.Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, **kwargs, ) -> None: """Instantiate the transport. @@ -53,6 +66,13 @@ def __init__( be loaded with :func:`google.auth.load_credentials_from_file`. This argument is mutually exclusive with credentials. scope (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. if ":" not in host: @@ -68,14 +88,222 @@ def __init__( if credentials_file is not None: credentials, _ = auth.load_credentials_from_file( - credentials_file, scopes=scopes + credentials_file, scopes=scopes, quota_project_id=quota_project_id ) + elif credentials is None: - credentials, _ = auth.default(scopes=scopes) + credentials, _ = auth.default( + scopes=scopes, quota_project_id=quota_project_id + ) # Save the credentials. self._credentials = credentials + # Lifted into its own function so it can be stubbed out during tests. + self._prep_wrapped_messages(client_info) + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.list_clusters: gapic_v1.method.wrap_method( + self.list_clusters, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=20.0, + client_info=client_info, + ), + self.get_cluster: gapic_v1.method.wrap_method( + self.get_cluster, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=20.0, + client_info=client_info, + ), + self.create_cluster: gapic_v1.method.wrap_method( + self.create_cluster, default_timeout=45.0, client_info=client_info, + ), + self.update_cluster: gapic_v1.method.wrap_method( + self.update_cluster, default_timeout=45.0, client_info=client_info, + ), + self.update_node_pool: gapic_v1.method.wrap_method( + self.update_node_pool, default_timeout=45.0, client_info=client_info, + ), + self.set_node_pool_autoscaling: gapic_v1.method.wrap_method( + self.set_node_pool_autoscaling, + default_timeout=45.0, + client_info=client_info, + ), + self.set_logging_service: gapic_v1.method.wrap_method( + self.set_logging_service, default_timeout=45.0, client_info=client_info, + ), + self.set_monitoring_service: gapic_v1.method.wrap_method( + self.set_monitoring_service, + default_timeout=45.0, + client_info=client_info, + ), + self.set_addons_config: gapic_v1.method.wrap_method( + self.set_addons_config, default_timeout=45.0, client_info=client_info, + ), + self.set_locations: gapic_v1.method.wrap_method( + self.set_locations, default_timeout=45.0, client_info=client_info, + ), + self.update_master: gapic_v1.method.wrap_method( + self.update_master, default_timeout=45.0, client_info=client_info, + ), + self.set_master_auth: gapic_v1.method.wrap_method( + self.set_master_auth, default_timeout=45.0, client_info=client_info, + ), + self.delete_cluster: gapic_v1.method.wrap_method( + self.delete_cluster, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=20.0, + client_info=client_info, + ), + self.list_operations: gapic_v1.method.wrap_method( + self.list_operations, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=20.0, + client_info=client_info, + ), + self.get_operation: gapic_v1.method.wrap_method( + self.get_operation, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=20.0, + client_info=client_info, + ), + self.cancel_operation: gapic_v1.method.wrap_method( + self.cancel_operation, default_timeout=45.0, client_info=client_info, + ), + self.get_server_config: gapic_v1.method.wrap_method( + self.get_server_config, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=20.0, + client_info=client_info, + ), + self.list_node_pools: gapic_v1.method.wrap_method( + self.list_node_pools, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=20.0, + client_info=client_info, + ), + self.get_node_pool: gapic_v1.method.wrap_method( + self.get_node_pool, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=20.0, + client_info=client_info, + ), + self.create_node_pool: gapic_v1.method.wrap_method( + self.create_node_pool, default_timeout=45.0, client_info=client_info, + ), + self.delete_node_pool: gapic_v1.method.wrap_method( + self.delete_node_pool, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.DeadlineExceeded, exceptions.ServiceUnavailable, + ), + ), + default_timeout=20.0, + client_info=client_info, + ), + self.rollback_node_pool_upgrade: gapic_v1.method.wrap_method( + self.rollback_node_pool_upgrade, + default_timeout=45.0, + client_info=client_info, + ), + self.set_node_pool_management: gapic_v1.method.wrap_method( + self.set_node_pool_management, + default_timeout=45.0, + client_info=client_info, + ), + self.set_labels: gapic_v1.method.wrap_method( + self.set_labels, default_timeout=45.0, client_info=client_info, + ), + self.set_legacy_abac: gapic_v1.method.wrap_method( + self.set_legacy_abac, default_timeout=45.0, client_info=client_info, + ), + self.start_ip_rotation: gapic_v1.method.wrap_method( + self.start_ip_rotation, default_timeout=45.0, client_info=client_info, + ), + self.complete_ip_rotation: gapic_v1.method.wrap_method( + self.complete_ip_rotation, + default_timeout=45.0, + client_info=client_info, + ), + self.set_node_pool_size: gapic_v1.method.wrap_method( + self.set_node_pool_size, default_timeout=45.0, client_info=client_info, + ), + self.set_network_policy: gapic_v1.method.wrap_method( + self.set_network_policy, default_timeout=45.0, client_info=client_info, + ), + self.set_maintenance_policy: gapic_v1.method.wrap_method( + self.set_maintenance_policy, + default_timeout=45.0, + client_info=client_info, + ), + self.list_usable_subnetworks: gapic_v1.method.wrap_method( + self.list_usable_subnetworks, + default_timeout=None, + client_info=client_info, + ), + } + @property def list_clusters( self, diff --git a/google/cloud/container_v1/services/cluster_manager/transports/grpc.py b/google/cloud/container_v1/services/cluster_manager/transports/grpc.py index a9eb88e5..c738e64a 100644 --- a/google/cloud/container_v1/services/cluster_manager/transports/grpc.py +++ b/google/cloud/container_v1/services/cluster_manager/transports/grpc.py @@ -15,20 +15,21 @@ # limitations under the License. # +import warnings from typing import Callable, Dict, Optional, Sequence, Tuple from google.api_core import grpc_helpers # type: ignore +from google.api_core import gapic_v1 # type: ignore from google import auth # type: ignore from google.auth import credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore - import grpc # type: ignore from google.cloud.container_v1.types import cluster_service from google.protobuf import empty_pb2 as empty # type: ignore -from .base import ClusterManagerTransport +from .base import ClusterManagerTransport, DEFAULT_CLIENT_INFO class ClusterManagerGrpcTransport(ClusterManagerTransport): @@ -55,7 +56,10 @@ def __init__( scopes: Sequence[str] = None, channel: grpc.Channel = None, api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiate the transport. @@ -74,14 +78,23 @@ def __init__( ignored if ``channel`` is provided. channel (Optional[grpc.Channel]): A ``Channel`` instance through which to make calls. - api_mtls_endpoint (Optional[str]): The mutual TLS endpoint. If - provided, it overrides the ``host`` argument and tries to create + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create a mutual TLS channel with client SSL credentials from ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): A - callback to provide client SSL certificate bytes and private key - bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` - is None. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport @@ -97,6 +110,11 @@ def __init__( # If a channel was explicitly provided, set it. self._grpc_channel = channel elif api_mtls_endpoint: + warnings.warn( + "api_mtls_endpoint and client_cert_source are deprecated", + DeprecationWarning, + ) + host = ( api_mtls_endpoint if ":" in api_mtls_endpoint @@ -104,7 +122,9 @@ def __init__( ) if credentials is None: - credentials, _ = auth.default(scopes=self.AUTH_SCOPES) + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) # Create SSL credentials with client_cert_source or application # default SSL credentials. @@ -123,7 +143,27 @@ def __init__( credentials_file=credentials_file, ssl_credentials=ssl_credentials, scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, ) + else: + host = host if ":" in host else host + ":443" + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_channel_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + self._stubs = {} # type: Dict[str, Callable] # Run the base constructor. super().__init__( @@ -131,10 +171,10 @@ def __init__( credentials=credentials, credentials_file=credentials_file, scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, ) - self._stubs = {} # type: Dict[str, Callable] - @classmethod def create_channel( cls, @@ -142,7 +182,8 @@ def create_channel( credentials: credentials.Credentials = None, credentials_file: str = None, scopes: Optional[Sequence[str]] = None, - **kwargs + quota_project_id: Optional[str] = None, + **kwargs, ) -> grpc.Channel: """Create and return a gRPC channel object. Args: @@ -158,6 +199,8 @@ def create_channel( scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. kwargs (Optional[dict]): Keyword arguments, which are passed to the channel creation. Returns: @@ -173,7 +216,8 @@ def create_channel( credentials=credentials, credentials_file=credentials_file, scopes=scopes, - **kwargs + quota_project_id=quota_project_id, + **kwargs, ) @property @@ -183,13 +227,6 @@ def grpc_channel(self) -> grpc.Channel: This property caches on the instance; repeated calls return the same channel. """ - # Sanity check: Only create a new channel if we do not already - # have one. - if not hasattr(self, "_grpc_channel"): - self._grpc_channel = self.create_channel( - self._host, credentials=self._credentials, - ) - # Return the channel from cache. return self._grpc_channel diff --git a/google/cloud/container_v1/services/cluster_manager/transports/grpc_asyncio.py b/google/cloud/container_v1/services/cluster_manager/transports/grpc_asyncio.py index add48e49..e53f51f9 100644 --- a/google/cloud/container_v1/services/cluster_manager/transports/grpc_asyncio.py +++ b/google/cloud/container_v1/services/cluster_manager/transports/grpc_asyncio.py @@ -15,9 +15,12 @@ # limitations under the License. # +import warnings from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple +from google.api_core import gapic_v1 # type: ignore from google.api_core import grpc_helpers_async # type: ignore +from google import auth # type: ignore from google.auth import credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore @@ -27,7 +30,7 @@ from google.cloud.container_v1.types import cluster_service from google.protobuf import empty_pb2 as empty # type: ignore -from .base import ClusterManagerTransport +from .base import ClusterManagerTransport, DEFAULT_CLIENT_INFO from .grpc import ClusterManagerGrpcTransport @@ -54,7 +57,8 @@ def create_channel( credentials: credentials.Credentials = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, - **kwargs + quota_project_id: Optional[str] = None, + **kwargs, ) -> aio.Channel: """Create and return a gRPC AsyncIO channel object. Args: @@ -70,6 +74,8 @@ def create_channel( scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. kwargs (Optional[dict]): Keyword arguments, which are passed to the channel creation. Returns: @@ -81,7 +87,8 @@ def create_channel( credentials=credentials, credentials_file=credentials_file, scopes=scopes, - **kwargs + quota_project_id=quota_project_id, + **kwargs, ) def __init__( @@ -93,7 +100,10 @@ def __init__( scopes: Optional[Sequence[str]] = None, channel: aio.Channel = None, api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiate the transport. @@ -113,14 +123,23 @@ def __init__( are passed to :func:`google.auth.default`. channel (Optional[aio.Channel]): A ``Channel`` instance through which to make calls. - api_mtls_endpoint (Optional[str]): The mutual TLS endpoint. If - provided, it overrides the ``host`` argument and tries to create + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create a mutual TLS channel with client SSL credentials from ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): A - callback to provide client SSL certificate bytes and private key - bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` - is None. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. Raises: google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport @@ -136,12 +155,22 @@ def __init__( # If a channel was explicitly provided, set it. self._grpc_channel = channel elif api_mtls_endpoint: + warnings.warn( + "api_mtls_endpoint and client_cert_source are deprecated", + DeprecationWarning, + ) + host = ( api_mtls_endpoint if ":" in api_mtls_endpoint else api_mtls_endpoint + ":443" ) + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + # Create SSL credentials with client_cert_source or application # default SSL credentials. if client_cert_source: @@ -159,6 +188,24 @@ def __init__( credentials_file=credentials_file, ssl_credentials=ssl_credentials, scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + else: + host = host if ":" in host else host + ":443" + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_channel_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, ) # Run the base constructor. @@ -167,6 +214,8 @@ def __init__( credentials=credentials, credentials_file=credentials_file, scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, ) self._stubs = {} @@ -178,13 +227,6 @@ def grpc_channel(self) -> aio.Channel: This property caches on the instance; repeated calls return the same channel. """ - # Sanity check: Only create a new channel if we do not already - # have one. - if not hasattr(self, "_grpc_channel"): - self._grpc_channel = self.create_channel( - self._host, credentials=self._credentials, - ) - # Return the channel from cache. return self._grpc_channel diff --git a/google/cloud/container_v1/types/cluster_service.py b/google/cloud/container_v1/types/cluster_service.py index 6ae45902..01054685 100644 --- a/google/cloud/container_v1/types/cluster_service.py +++ b/google/cloud/container_v1/types/cluster_service.py @@ -2499,11 +2499,11 @@ class MaintenanceWindow(proto.Message): """ daily_maintenance_window = proto.Field( - proto.MESSAGE, number=2, message="DailyMaintenanceWindow", + proto.MESSAGE, number=2, oneof="policy", message="DailyMaintenanceWindow", ) recurring_window = proto.Field( - proto.MESSAGE, number=3, message="RecurringTimeWindow", + proto.MESSAGE, number=3, oneof="policy", message="RecurringTimeWindow", ) maintenance_exclusions = proto.MapField( diff --git a/google/cloud/container_v1beta1/services/cluster_manager/async_client.py b/google/cloud/container_v1beta1/services/cluster_manager/async_client.py index 38d50668..5767ac07 100644 --- a/google/cloud/container_v1beta1/services/cluster_manager/async_client.py +++ b/google/cloud/container_v1beta1/services/cluster_manager/async_client.py @@ -31,7 +31,7 @@ from google.cloud.container_v1beta1.services.cluster_manager import pagers from google.cloud.container_v1beta1.types import cluster_service -from .transports.base import ClusterManagerTransport +from .transports.base import ClusterManagerTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import ClusterManagerGrpcAsyncIOTransport from .client import ClusterManagerClient @@ -57,6 +57,7 @@ def __init__( credentials: credentials.Credentials = None, transport: Union[str, ClusterManagerTransport] = "grpc_asyncio", client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiate the cluster manager client. @@ -72,16 +73,19 @@ def __init__( client_options (ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT environment variable can also be used to override the endpoint: "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint, this is the default value for - the environment variable) and "auto" (auto switch to the default - mTLS endpoint if client SSL credentials is present). However, - the ``api_endpoint`` property takes precedence if provided. - (2) The ``client_cert_source`` property is used to provide client - SSL credentials for mutual TLS transport. If not provided, the - default SSL credentials will be used if present. + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. Raises: google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport @@ -89,7 +93,10 @@ def __init__( """ self._client = ClusterManagerClient( - credentials=credentials, transport=transport, client_options=client_options, + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, ) async def list_clusters( @@ -163,8 +170,16 @@ async def list_clusters( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_clusters, - default_timeout=None, - client_info=_client_info, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=20.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -258,8 +273,16 @@ async def get_cluster( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_cluster, - default_timeout=None, - client_info=_client_info, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=20.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -367,8 +390,8 @@ async def create_cluster( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.create_cluster, - default_timeout=None, - client_info=_client_info, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -475,8 +498,8 @@ async def update_cluster( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.update_cluster, - default_timeout=None, - client_info=_client_info, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -529,8 +552,8 @@ async def update_node_pool( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.update_node_pool, - default_timeout=None, - client_info=_client_info, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -583,8 +606,8 @@ async def set_node_pool_autoscaling( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.set_node_pool_autoscaling, - default_timeout=None, - client_info=_client_info, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -695,8 +718,8 @@ async def set_logging_service( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.set_logging_service, - default_timeout=None, - client_info=_client_info, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -809,8 +832,8 @@ async def set_monitoring_service( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.set_monitoring_service, - default_timeout=None, - client_info=_client_info, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -918,8 +941,8 @@ async def set_addons_config( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.set_addons_config, - default_timeout=None, - client_info=_client_info, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -1033,8 +1056,8 @@ async def set_locations( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.set_locations, - default_timeout=None, - client_info=_client_info, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -1153,8 +1176,8 @@ async def update_master( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.update_master, - default_timeout=None, - client_info=_client_info, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -1209,8 +1232,8 @@ async def set_master_auth( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.set_master_auth, - default_timeout=None, - client_info=_client_info, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -1317,8 +1340,16 @@ async def delete_cluster( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_cluster, - default_timeout=None, - client_info=_client_info, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=20.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -1405,8 +1436,16 @@ async def list_operations( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_operations, - default_timeout=None, - client_info=_client_info, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=20.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -1503,8 +1542,16 @@ async def get_operation( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_operation, - default_timeout=None, - client_info=_client_info, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=20.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -1593,8 +1640,8 @@ async def cancel_operation( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.cancel_operation, - default_timeout=None, - client_info=_client_info, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -1679,8 +1726,16 @@ async def get_server_config( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_server_config, - default_timeout=None, - client_info=_client_info, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=20.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -1775,8 +1830,16 @@ async def list_node_pools( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_node_pools, - default_timeout=None, - client_info=_client_info, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=20.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -1890,8 +1953,16 @@ async def get_node_pool( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_node_pool, - default_timeout=None, - client_info=_client_info, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=20.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -1996,8 +2067,8 @@ async def create_node_pool( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.create_node_pool, - default_timeout=None, - client_info=_client_info, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -2105,8 +2176,16 @@ async def delete_node_pool( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.delete_node_pool, - default_timeout=None, - client_info=_client_info, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=20.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -2219,8 +2298,8 @@ async def rollback_node_pool_upgrade( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.rollback_node_pool_upgrade, - default_timeout=None, - client_info=_client_info, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -2340,8 +2419,8 @@ async def set_node_pool_management( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.set_node_pool_management, - default_timeout=None, - client_info=_client_info, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -2471,8 +2550,8 @@ async def set_labels( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.set_labels, - default_timeout=None, - client_info=_client_info, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -2580,8 +2659,8 @@ async def set_legacy_abac( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.set_legacy_abac, - default_timeout=None, - client_info=_client_info, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -2679,8 +2758,8 @@ async def start_ip_rotation( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.start_ip_rotation, - default_timeout=None, - client_info=_client_info, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -2777,8 +2856,8 @@ async def complete_ip_rotation( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.complete_ip_rotation, - default_timeout=None, - client_info=_client_info, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -2830,8 +2909,8 @@ async def set_node_pool_size( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.set_node_pool_size, - default_timeout=None, - client_info=_client_info, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -2937,8 +3016,8 @@ async def set_network_policy( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.set_network_policy, - default_timeout=None, - client_info=_client_info, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -3042,8 +3121,8 @@ async def set_maintenance_policy( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.set_maintenance_policy, - default_timeout=None, - client_info=_client_info, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -3120,8 +3199,16 @@ async def list_usable_subnetworks( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_usable_subnetworks, - default_timeout=None, - client_info=_client_info, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=20.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -3199,8 +3286,16 @@ async def list_locations( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_locations, - default_timeout=None, - client_info=_client_info, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=20.0, + client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; @@ -3217,11 +3312,11 @@ async def list_locations( try: - _client_info = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution("google-container",).version, + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-container",).version, ) except pkg_resources.DistributionNotFound: - _client_info = gapic_v1.client_info.ClientInfo() + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() __all__ = ("ClusterManagerAsyncClient",) diff --git a/google/cloud/container_v1beta1/services/cluster_manager/client.py b/google/cloud/container_v1beta1/services/cluster_manager/client.py index 58dffe37..afd63ed9 100644 --- a/google/cloud/container_v1beta1/services/cluster_manager/client.py +++ b/google/cloud/container_v1beta1/services/cluster_manager/client.py @@ -16,6 +16,7 @@ # from collections import OrderedDict +from distutils import util import os import re from typing import Callable, Dict, Sequence, Tuple, Type, Union @@ -27,13 +28,14 @@ from google.api_core import retry as retries # type: ignore from google.auth import credentials # type: ignore from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore from google.auth.exceptions import MutualTLSChannelError # type: ignore from google.oauth2 import service_account # type: ignore from google.cloud.container_v1beta1.services.cluster_manager import pagers from google.cloud.container_v1beta1.types import cluster_service -from .transports.base import ClusterManagerTransport +from .transports.base import ClusterManagerTransport, DEFAULT_CLIENT_INFO from .transports.grpc import ClusterManagerGrpcTransport from .transports.grpc_asyncio import ClusterManagerGrpcAsyncIOTransport @@ -134,6 +136,7 @@ def __init__( credentials: credentials.Credentials = None, transport: Union[str, ClusterManagerTransport] = None, client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiate the cluster manager client. @@ -149,16 +152,24 @@ def __init__( client_options (ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT environment variable can also be used to override the endpoint: "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint, this is the default value for - the environment variable) and "auto" (auto switch to the default - mTLS endpoint if client SSL credentials is present). However, - the ``api_endpoint`` property takes precedence if provided. - (2) The ``client_cert_source`` property is used to provide client - SSL credentials for mutual TLS transport. If not provided, the - default SSL credentials will be used if present. + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport @@ -169,25 +180,43 @@ def __init__( if client_options is None: client_options = ClientOptions.ClientOptions() - if client_options.api_endpoint is None: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS", "never") + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool( + util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")) + ) + + ssl_credentials = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + import grpc # type: ignore + + cert, key = client_options.client_cert_source() + ssl_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + is_mtls = True + else: + creds = SslCredentials() + is_mtls = creds.is_mtls + ssl_credentials = creds.ssl_credentials if is_mtls else None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") if use_mtls_env == "never": - client_options.api_endpoint = self.DEFAULT_ENDPOINT + api_endpoint = self.DEFAULT_ENDPOINT elif use_mtls_env == "always": - client_options.api_endpoint = self.DEFAULT_MTLS_ENDPOINT + api_endpoint = self.DEFAULT_MTLS_ENDPOINT elif use_mtls_env == "auto": - has_client_cert_source = ( - client_options.client_cert_source is not None - or mtls.has_default_client_cert_source() - ) - client_options.api_endpoint = ( - self.DEFAULT_MTLS_ENDPOINT - if has_client_cert_source - else self.DEFAULT_ENDPOINT + api_endpoint = ( + self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT ) else: raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS value. Accepted values: never, auto, always" + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always" ) # Save or instantiate the transport. @@ -211,10 +240,11 @@ def __init__( self._transport = Transport( credentials=credentials, credentials_file=client_options.credentials_file, - host=client_options.api_endpoint, + host=api_endpoint, scopes=client_options.scopes, - api_mtls_endpoint=client_options.api_endpoint, - client_cert_source=client_options.client_cert_source, + ssl_channel_credentials=ssl_credentials, + quota_project_id=client_options.quota_project_id, + client_info=client_info, ) def list_clusters( @@ -268,29 +298,31 @@ def list_clusters( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([project_id, zone]): + has_flattened_params = any([project_id, zone]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.ListClustersRequest(request) + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.ListClustersRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.ListClustersRequest): + request = cluster_service.ListClustersRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. + # If we have keyword arguments corresponding to fields on the + # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.list_clusters, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.list_clusters] # Certain fields should be provided within the metadata header; # add these here. @@ -361,29 +393,33 @@ def get_cluster( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([project_id, zone, cluster_id]): + has_flattened_params = any([project_id, zone, cluster_id]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.GetClusterRequest(request) + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.GetClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.GetClusterRequest): + request = cluster_service.GetClusterRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. + # If we have keyword arguments corresponding to fields on the + # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.get_cluster, default_timeout=None, client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.get_cluster] # Certain fields should be provided within the metadata header; # add these here. @@ -468,31 +504,33 @@ def create_cluster( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([project_id, zone, cluster]): + has_flattened_params = any([project_id, zone, cluster]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.CreateClusterRequest(request) + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.CreateClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.CreateClusterRequest): + request = cluster_service.CreateClusterRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. + # If we have keyword arguments corresponding to fields on the + # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster is not None: - request.cluster = cluster + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster is not None: + request.cluster = cluster # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.create_cluster, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.create_cluster] # Certain fields should be provided within the metadata header; # add these here. @@ -574,33 +612,35 @@ def update_cluster( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([project_id, zone, cluster_id, update]): + has_flattened_params = any([project_id, zone, cluster_id, update]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.UpdateClusterRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if update is not None: - request.update = update + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.UpdateClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.UpdateClusterRequest): + request = cluster_service.UpdateClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if update is not None: + request.update = update # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.update_cluster, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.update_cluster] # Certain fields should be provided within the metadata header; # add these here. @@ -646,15 +686,16 @@ def update_node_pool( """ # Create or coerce a protobuf request object. - request = cluster_service.UpdateNodePoolRequest(request) + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.UpdateNodePoolRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.UpdateNodePoolRequest): + request = cluster_service.UpdateNodePoolRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.update_node_pool, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.update_node_pool] # Certain fields should be provided within the metadata header; # add these here. @@ -700,15 +741,18 @@ def set_node_pool_autoscaling( """ # Create or coerce a protobuf request object. - request = cluster_service.SetNodePoolAutoscalingRequest(request) + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.SetNodePoolAutoscalingRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.SetNodePoolAutoscalingRequest): + request = cluster_service.SetNodePoolAutoscalingRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.set_node_pool_autoscaling, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[ + self._transport.set_node_pool_autoscaling + ] # Certain fields should be provided within the metadata header; # add these here. @@ -794,33 +838,35 @@ def set_logging_service( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([project_id, zone, cluster_id, logging_service]): + has_flattened_params = any([project_id, zone, cluster_id, logging_service]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.SetLoggingServiceRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if logging_service is not None: - request.logging_service = logging_service + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.SetLoggingServiceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.SetLoggingServiceRequest): + request = cluster_service.SetLoggingServiceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if logging_service is not None: + request.logging_service = logging_service # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.set_logging_service, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.set_logging_service] # Certain fields should be provided within the metadata header; # add these here. @@ -906,35 +952,35 @@ def set_monitoring_service( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any( - [project_id, zone, cluster_id, monitoring_service] - ): + has_flattened_params = any([project_id, zone, cluster_id, monitoring_service]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.SetMonitoringServiceRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if monitoring_service is not None: - request.monitoring_service = monitoring_service + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.SetMonitoringServiceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.SetMonitoringServiceRequest): + request = cluster_service.SetMonitoringServiceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if monitoring_service is not None: + request.monitoring_service = monitoring_service # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.set_monitoring_service, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.set_monitoring_service] # Certain fields should be provided within the metadata header; # add these here. @@ -1017,33 +1063,35 @@ def set_addons_config( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([project_id, zone, cluster_id, addons_config]): + has_flattened_params = any([project_id, zone, cluster_id, addons_config]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.SetAddonsConfigRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if addons_config is not None: - request.addons_config = addons_config + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.SetAddonsConfigRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.SetAddonsConfigRequest): + request = cluster_service.SetAddonsConfigRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if addons_config is not None: + request.addons_config = addons_config # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.set_addons_config, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.set_addons_config] # Certain fields should be provided within the metadata header; # add these here. @@ -1132,33 +1180,35 @@ def set_locations( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([project_id, zone, cluster_id, locations]): + has_flattened_params = any([project_id, zone, cluster_id, locations]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.SetLocationsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if locations is not None: - request.locations = locations + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.SetLocationsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.SetLocationsRequest): + request = cluster_service.SetLocationsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if locations is not None: + request.locations = locations # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.set_locations, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.set_locations] # Certain fields should be provided within the metadata header; # add these here. @@ -1252,33 +1302,35 @@ def update_master( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([project_id, zone, cluster_id, master_version]): + has_flattened_params = any([project_id, zone, cluster_id, master_version]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.UpdateMasterRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if master_version is not None: - request.master_version = master_version + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.UpdateMasterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.UpdateMasterRequest): + request = cluster_service.UpdateMasterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if master_version is not None: + request.master_version = master_version # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.update_master, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.update_master] # Certain fields should be provided within the metadata header; # add these here. @@ -1326,15 +1378,16 @@ def set_master_auth( """ # Create or coerce a protobuf request object. - request = cluster_service.SetMasterAuthRequest(request) + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.SetMasterAuthRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.SetMasterAuthRequest): + request = cluster_service.SetMasterAuthRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.set_master_auth, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.set_master_auth] # Certain fields should be provided within the metadata header; # add these here. @@ -1418,31 +1471,33 @@ def delete_cluster( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([project_id, zone, cluster_id]): + has_flattened_params = any([project_id, zone, cluster_id]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.DeleteClusterRequest(request) + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.DeleteClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.DeleteClusterRequest): + request = cluster_service.DeleteClusterRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. + # If we have keyword arguments corresponding to fields on the + # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.delete_cluster, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.delete_cluster] # Certain fields should be provided within the metadata header; # add these here. @@ -1508,29 +1563,31 @@ def list_operations( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([project_id, zone]): + has_flattened_params = any([project_id, zone]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.ListOperationsRequest(request) + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.ListOperationsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.ListOperationsRequest): + request = cluster_service.ListOperationsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. + # If we have keyword arguments corresponding to fields on the + # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.list_operations, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.list_operations] # Certain fields should be provided within the metadata header; # add these here. @@ -1604,31 +1661,33 @@ def get_operation( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([project_id, zone, operation_id]): + has_flattened_params = any([project_id, zone, operation_id]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.GetOperationRequest(request) + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.GetOperationRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.GetOperationRequest): + request = cluster_service.GetOperationRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. + # If we have keyword arguments corresponding to fields on the + # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if operation_id is not None: - request.operation_id = operation_id + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if operation_id is not None: + request.operation_id = operation_id # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.get_operation, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.get_operation] # Certain fields should be provided within the metadata header; # add these here. @@ -1694,31 +1753,33 @@ def cancel_operation( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([project_id, zone, operation_id]): + has_flattened_params = any([project_id, zone, operation_id]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.CancelOperationRequest(request) + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.CancelOperationRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.CancelOperationRequest): + request = cluster_service.CancelOperationRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. + # If we have keyword arguments corresponding to fields on the + # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if operation_id is not None: - request.operation_id = operation_id + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if operation_id is not None: + request.operation_id = operation_id # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.cancel_operation, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.cancel_operation] # Certain fields should be provided within the metadata header; # add these here. @@ -1782,29 +1843,31 @@ def get_server_config( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([project_id, zone]): + has_flattened_params = any([project_id, zone]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.GetServerConfigRequest(request) + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.GetServerConfigRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.GetServerConfigRequest): + request = cluster_service.GetServerConfigRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. + # If we have keyword arguments corresponding to fields on the + # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.get_server_config, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.get_server_config] # Certain fields should be provided within the metadata header; # add these here. @@ -1876,31 +1939,33 @@ def list_node_pools( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([project_id, zone, cluster_id]): + has_flattened_params = any([project_id, zone, cluster_id]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.ListNodePoolsRequest(request) + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.ListNodePoolsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.ListNodePoolsRequest): + request = cluster_service.ListNodePoolsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. + # If we have keyword arguments corresponding to fields on the + # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.list_node_pools, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.list_node_pools] # Certain fields should be provided within the metadata header; # add these here. @@ -1989,33 +2054,35 @@ def get_node_pool( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([project_id, zone, cluster_id, node_pool_id]): + has_flattened_params = any([project_id, zone, cluster_id, node_pool_id]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.GetNodePoolRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if node_pool_id is not None: - request.node_pool_id = node_pool_id + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.GetNodePoolRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.GetNodePoolRequest): + request = cluster_service.GetNodePoolRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if node_pool_id is not None: + request.node_pool_id = node_pool_id # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.get_node_pool, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.get_node_pool] # Certain fields should be provided within the metadata header; # add these here. @@ -2095,33 +2162,35 @@ def create_node_pool( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([project_id, zone, cluster_id, node_pool]): + has_flattened_params = any([project_id, zone, cluster_id, node_pool]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.CreateNodePoolRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if node_pool is not None: - request.node_pool = node_pool + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.CreateNodePoolRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.CreateNodePoolRequest): + request = cluster_service.CreateNodePoolRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if node_pool is not None: + request.node_pool = node_pool # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.create_node_pool, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.create_node_pool] # Certain fields should be provided within the metadata header; # add these here. @@ -2204,33 +2273,35 @@ def delete_node_pool( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([project_id, zone, cluster_id, node_pool_id]): + has_flattened_params = any([project_id, zone, cluster_id, node_pool_id]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.DeleteNodePoolRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if node_pool_id is not None: - request.node_pool_id = node_pool_id + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.DeleteNodePoolRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.DeleteNodePoolRequest): + request = cluster_service.DeleteNodePoolRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if node_pool_id is not None: + request.node_pool_id = node_pool_id # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.delete_node_pool, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.delete_node_pool] # Certain fields should be provided within the metadata header; # add these here. @@ -2318,33 +2389,37 @@ def rollback_node_pool_upgrade( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([project_id, zone, cluster_id, node_pool_id]): + has_flattened_params = any([project_id, zone, cluster_id, node_pool_id]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.RollbackNodePoolUpgradeRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if node_pool_id is not None: - request.node_pool_id = node_pool_id + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.RollbackNodePoolUpgradeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.RollbackNodePoolUpgradeRequest): + request = cluster_service.RollbackNodePoolUpgradeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if node_pool_id is not None: + request.node_pool_id = node_pool_id # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.rollback_node_pool_upgrade, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[ + self._transport.rollback_node_pool_upgrade + ] # Certain fields should be provided within the metadata header; # add these here. @@ -2435,37 +2510,39 @@ def set_node_pool_management( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any( + has_flattened_params = any( [project_id, zone, cluster_id, node_pool_id, management] - ): + ) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.SetNodePoolManagementRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if node_pool_id is not None: - request.node_pool_id = node_pool_id - if management is not None: - request.management = management + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.SetNodePoolManagementRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.SetNodePoolManagementRequest): + request = cluster_service.SetNodePoolManagementRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if node_pool_id is not None: + request.node_pool_id = node_pool_id + if management is not None: + request.management = management # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.set_node_pool_management, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.set_node_pool_management] # Certain fields should be provided within the metadata header; # add these here. @@ -2566,35 +2643,39 @@ def set_labels( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any( + has_flattened_params = any( [project_id, zone, cluster_id, resource_labels, label_fingerprint] - ): + ) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.SetLabelsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if resource_labels is not None: - request.resource_labels = resource_labels - if label_fingerprint is not None: - request.label_fingerprint = label_fingerprint + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.SetLabelsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.SetLabelsRequest): + request = cluster_service.SetLabelsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if resource_labels is not None: + request.resource_labels = resource_labels + if label_fingerprint is not None: + request.label_fingerprint = label_fingerprint # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.set_labels, default_timeout=None, client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.set_labels] # Certain fields should be provided within the metadata header; # add these here. @@ -2677,33 +2758,35 @@ def set_legacy_abac( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([project_id, zone, cluster_id, enabled]): + has_flattened_params = any([project_id, zone, cluster_id, enabled]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.SetLegacyAbacRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if enabled is not None: - request.enabled = enabled + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.SetLegacyAbacRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.SetLegacyAbacRequest): + request = cluster_service.SetLegacyAbacRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if enabled is not None: + request.enabled = enabled # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.set_legacy_abac, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.set_legacy_abac] # Certain fields should be provided within the metadata header; # add these here. @@ -2778,31 +2861,33 @@ def start_ip_rotation( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([project_id, zone, cluster_id]): + has_flattened_params = any([project_id, zone, cluster_id]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.StartIPRotationRequest(request) + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.StartIPRotationRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.StartIPRotationRequest): + request = cluster_service.StartIPRotationRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. + # If we have keyword arguments corresponding to fields on the + # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.start_ip_rotation, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.start_ip_rotation] # Certain fields should be provided within the metadata header; # add these here. @@ -2876,31 +2961,33 @@ def complete_ip_rotation( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([project_id, zone, cluster_id]): + has_flattened_params = any([project_id, zone, cluster_id]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.CompleteIPRotationRequest(request) + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.CompleteIPRotationRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.CompleteIPRotationRequest): + request = cluster_service.CompleteIPRotationRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. + # If we have keyword arguments corresponding to fields on the + # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.complete_ip_rotation, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.complete_ip_rotation] # Certain fields should be provided within the metadata header; # add these here. @@ -2945,15 +3032,16 @@ def set_node_pool_size( """ # Create or coerce a protobuf request object. - request = cluster_service.SetNodePoolSizeRequest(request) + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.SetNodePoolSizeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.SetNodePoolSizeRequest): + request = cluster_service.SetNodePoolSizeRequest(request) # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.set_node_pool_size, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.set_node_pool_size] # Certain fields should be provided within the metadata header; # add these here. @@ -3034,33 +3122,35 @@ def set_network_policy( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([project_id, zone, cluster_id, network_policy]): + has_flattened_params = any([project_id, zone, cluster_id, network_policy]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.SetNetworkPolicyRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if network_policy is not None: - request.network_policy = network_policy + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.SetNetworkPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.SetNetworkPolicyRequest): + request = cluster_service.SetNetworkPolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if network_policy is not None: + request.network_policy = network_policy # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.set_network_policy, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.set_network_policy] # Certain fields should be provided within the metadata header; # add these here. @@ -3137,35 +3227,35 @@ def set_maintenance_policy( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any( - [project_id, zone, cluster_id, maintenance_policy] - ): + has_flattened_params = any([project_id, zone, cluster_id, maintenance_policy]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.SetMaintenancePolicyRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if maintenance_policy is not None: - request.maintenance_policy = maintenance_policy + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.SetMaintenancePolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.SetMaintenancePolicyRequest): + request = cluster_service.SetMaintenancePolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if maintenance_policy is not None: + request.maintenance_policy = maintenance_policy # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.set_maintenance_policy, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.set_maintenance_policy] # Certain fields should be provided within the metadata header; # add these here. @@ -3223,27 +3313,29 @@ def list_usable_subnetworks( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([parent]): + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.ListUsableSubnetworksRequest(request) + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.ListUsableSubnetworksRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.ListUsableSubnetworksRequest): + request = cluster_service.ListUsableSubnetworksRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. + # If we have keyword arguments corresponding to fields on the + # request, apply these. - if parent is not None: - request.parent = parent + if parent is not None: + request.parent = parent # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.list_usable_subnetworks, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.list_usable_subnetworks] # Certain fields should be provided within the metadata header; # add these here. @@ -3302,27 +3394,29 @@ def list_locations( # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. - if request is not None and any([parent]): + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) - request = cluster_service.ListLocationsRequest(request) + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.ListLocationsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.ListLocationsRequest): + request = cluster_service.ListLocationsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. + # If we have keyword arguments corresponding to fields on the + # request, apply these. - if parent is not None: - request.parent = parent + if parent is not None: + request.parent = parent # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. - rpc = gapic_v1.method.wrap_method( - self._transport.list_locations, - default_timeout=None, - client_info=_client_info, - ) + rpc = self._transport._wrapped_methods[self._transport.list_locations] # Certain fields should be provided within the metadata header; # add these here. @@ -3338,11 +3432,11 @@ def list_locations( try: - _client_info = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution("google-container",).version, + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-container",).version, ) except pkg_resources.DistributionNotFound: - _client_info = gapic_v1.client_info.ClientInfo() + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() __all__ = ("ClusterManagerClient",) diff --git a/google/cloud/container_v1beta1/services/cluster_manager/transports/base.py b/google/cloud/container_v1beta1/services/cluster_manager/transports/base.py index 18d6e7c3..975e5f7c 100644 --- a/google/cloud/container_v1beta1/services/cluster_manager/transports/base.py +++ b/google/cloud/container_v1beta1/services/cluster_manager/transports/base.py @@ -17,15 +17,26 @@ import abc import typing +import pkg_resources -from google import auth +from google import auth # type: ignore from google.api_core import exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore from google.auth import credentials # type: ignore from google.cloud.container_v1beta1.types import cluster_service from google.protobuf import empty_pb2 as empty # type: ignore +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution("google-cloud-container",).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + class ClusterManagerTransport(abc.ABC): """Abstract transport class for ClusterManager.""" @@ -38,6 +49,8 @@ def __init__( credentials: credentials.Credentials = None, credentials_file: typing.Optional[str] = None, scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES, + quota_project_id: typing.Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, **kwargs, ) -> None: """Instantiate the transport. @@ -53,6 +66,13 @@ def __init__( be loaded with :func:`google.auth.load_credentials_from_file`. This argument is mutually exclusive with credentials. scope (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. if ":" not in host: @@ -68,14 +88,243 @@ def __init__( if credentials_file is not None: credentials, _ = auth.load_credentials_from_file( - credentials_file, scopes=scopes + credentials_file, scopes=scopes, quota_project_id=quota_project_id ) + elif credentials is None: - credentials, _ = auth.default(scopes=scopes) + credentials, _ = auth.default( + scopes=scopes, quota_project_id=quota_project_id + ) # Save the credentials. self._credentials = credentials + # Lifted into its own function so it can be stubbed out during tests. + self._prep_wrapped_messages(client_info) + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.list_clusters: gapic_v1.method.wrap_method( + self.list_clusters, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=20.0, + client_info=client_info, + ), + self.get_cluster: gapic_v1.method.wrap_method( + self.get_cluster, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=20.0, + client_info=client_info, + ), + self.create_cluster: gapic_v1.method.wrap_method( + self.create_cluster, default_timeout=45.0, client_info=client_info, + ), + self.update_cluster: gapic_v1.method.wrap_method( + self.update_cluster, default_timeout=45.0, client_info=client_info, + ), + self.update_node_pool: gapic_v1.method.wrap_method( + self.update_node_pool, default_timeout=45.0, client_info=client_info, + ), + self.set_node_pool_autoscaling: gapic_v1.method.wrap_method( + self.set_node_pool_autoscaling, + default_timeout=45.0, + client_info=client_info, + ), + self.set_logging_service: gapic_v1.method.wrap_method( + self.set_logging_service, default_timeout=45.0, client_info=client_info, + ), + self.set_monitoring_service: gapic_v1.method.wrap_method( + self.set_monitoring_service, + default_timeout=45.0, + client_info=client_info, + ), + self.set_addons_config: gapic_v1.method.wrap_method( + self.set_addons_config, default_timeout=45.0, client_info=client_info, + ), + self.set_locations: gapic_v1.method.wrap_method( + self.set_locations, default_timeout=45.0, client_info=client_info, + ), + self.update_master: gapic_v1.method.wrap_method( + self.update_master, default_timeout=45.0, client_info=client_info, + ), + self.set_master_auth: gapic_v1.method.wrap_method( + self.set_master_auth, default_timeout=45.0, client_info=client_info, + ), + self.delete_cluster: gapic_v1.method.wrap_method( + self.delete_cluster, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=20.0, + client_info=client_info, + ), + self.list_operations: gapic_v1.method.wrap_method( + self.list_operations, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=20.0, + client_info=client_info, + ), + self.get_operation: gapic_v1.method.wrap_method( + self.get_operation, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=20.0, + client_info=client_info, + ), + self.cancel_operation: gapic_v1.method.wrap_method( + self.cancel_operation, default_timeout=45.0, client_info=client_info, + ), + self.get_server_config: gapic_v1.method.wrap_method( + self.get_server_config, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=20.0, + client_info=client_info, + ), + self.list_node_pools: gapic_v1.method.wrap_method( + self.list_node_pools, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=20.0, + client_info=client_info, + ), + self.get_node_pool: gapic_v1.method.wrap_method( + self.get_node_pool, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=20.0, + client_info=client_info, + ), + self.create_node_pool: gapic_v1.method.wrap_method( + self.create_node_pool, default_timeout=45.0, client_info=client_info, + ), + self.delete_node_pool: gapic_v1.method.wrap_method( + self.delete_node_pool, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=20.0, + client_info=client_info, + ), + self.rollback_node_pool_upgrade: gapic_v1.method.wrap_method( + self.rollback_node_pool_upgrade, + default_timeout=45.0, + client_info=client_info, + ), + self.set_node_pool_management: gapic_v1.method.wrap_method( + self.set_node_pool_management, + default_timeout=45.0, + client_info=client_info, + ), + self.set_labels: gapic_v1.method.wrap_method( + self.set_labels, default_timeout=45.0, client_info=client_info, + ), + self.set_legacy_abac: gapic_v1.method.wrap_method( + self.set_legacy_abac, default_timeout=45.0, client_info=client_info, + ), + self.start_ip_rotation: gapic_v1.method.wrap_method( + self.start_ip_rotation, default_timeout=45.0, client_info=client_info, + ), + self.complete_ip_rotation: gapic_v1.method.wrap_method( + self.complete_ip_rotation, + default_timeout=45.0, + client_info=client_info, + ), + self.set_node_pool_size: gapic_v1.method.wrap_method( + self.set_node_pool_size, default_timeout=45.0, client_info=client_info, + ), + self.set_network_policy: gapic_v1.method.wrap_method( + self.set_network_policy, default_timeout=45.0, client_info=client_info, + ), + self.set_maintenance_policy: gapic_v1.method.wrap_method( + self.set_maintenance_policy, + default_timeout=45.0, + client_info=client_info, + ), + self.list_usable_subnetworks: gapic_v1.method.wrap_method( + self.list_usable_subnetworks, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=20.0, + client_info=client_info, + ), + self.list_locations: gapic_v1.method.wrap_method( + self.list_locations, + default_retry=retries.Retry( + initial=0.1, + maximum=60.0, + multiplier=1.3, + predicate=retries.if_exception_type( + exceptions.ServiceUnavailable, exceptions.DeadlineExceeded, + ), + ), + default_timeout=20.0, + client_info=client_info, + ), + } + @property def list_clusters( self, diff --git a/google/cloud/container_v1beta1/services/cluster_manager/transports/grpc.py b/google/cloud/container_v1beta1/services/cluster_manager/transports/grpc.py index f1914169..f03d06b8 100644 --- a/google/cloud/container_v1beta1/services/cluster_manager/transports/grpc.py +++ b/google/cloud/container_v1beta1/services/cluster_manager/transports/grpc.py @@ -15,20 +15,21 @@ # limitations under the License. # +import warnings from typing import Callable, Dict, Optional, Sequence, Tuple from google.api_core import grpc_helpers # type: ignore +from google.api_core import gapic_v1 # type: ignore from google import auth # type: ignore from google.auth import credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore - import grpc # type: ignore from google.cloud.container_v1beta1.types import cluster_service from google.protobuf import empty_pb2 as empty # type: ignore -from .base import ClusterManagerTransport +from .base import ClusterManagerTransport, DEFAULT_CLIENT_INFO class ClusterManagerGrpcTransport(ClusterManagerTransport): @@ -55,7 +56,10 @@ def __init__( scopes: Sequence[str] = None, channel: grpc.Channel = None, api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiate the transport. @@ -74,14 +78,23 @@ def __init__( ignored if ``channel`` is provided. channel (Optional[grpc.Channel]): A ``Channel`` instance through which to make calls. - api_mtls_endpoint (Optional[str]): The mutual TLS endpoint. If - provided, it overrides the ``host`` argument and tries to create + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create a mutual TLS channel with client SSL credentials from ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): A - callback to provide client SSL certificate bytes and private key - bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` - is None. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. Raises: google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport @@ -97,6 +110,11 @@ def __init__( # If a channel was explicitly provided, set it. self._grpc_channel = channel elif api_mtls_endpoint: + warnings.warn( + "api_mtls_endpoint and client_cert_source are deprecated", + DeprecationWarning, + ) + host = ( api_mtls_endpoint if ":" in api_mtls_endpoint @@ -104,7 +122,9 @@ def __init__( ) if credentials is None: - credentials, _ = auth.default(scopes=self.AUTH_SCOPES) + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) # Create SSL credentials with client_cert_source or application # default SSL credentials. @@ -123,7 +143,27 @@ def __init__( credentials_file=credentials_file, ssl_credentials=ssl_credentials, scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, ) + else: + host = host if ":" in host else host + ":443" + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_channel_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + + self._stubs = {} # type: Dict[str, Callable] # Run the base constructor. super().__init__( @@ -131,10 +171,10 @@ def __init__( credentials=credentials, credentials_file=credentials_file, scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, ) - self._stubs = {} # type: Dict[str, Callable] - @classmethod def create_channel( cls, @@ -142,7 +182,8 @@ def create_channel( credentials: credentials.Credentials = None, credentials_file: str = None, scopes: Optional[Sequence[str]] = None, - **kwargs + quota_project_id: Optional[str] = None, + **kwargs, ) -> grpc.Channel: """Create and return a gRPC channel object. Args: @@ -158,6 +199,8 @@ def create_channel( scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. kwargs (Optional[dict]): Keyword arguments, which are passed to the channel creation. Returns: @@ -173,7 +216,8 @@ def create_channel( credentials=credentials, credentials_file=credentials_file, scopes=scopes, - **kwargs + quota_project_id=quota_project_id, + **kwargs, ) @property @@ -183,13 +227,6 @@ def grpc_channel(self) -> grpc.Channel: This property caches on the instance; repeated calls return the same channel. """ - # Sanity check: Only create a new channel if we do not already - # have one. - if not hasattr(self, "_grpc_channel"): - self._grpc_channel = self.create_channel( - self._host, credentials=self._credentials, - ) - # Return the channel from cache. return self._grpc_channel diff --git a/google/cloud/container_v1beta1/services/cluster_manager/transports/grpc_asyncio.py b/google/cloud/container_v1beta1/services/cluster_manager/transports/grpc_asyncio.py index a53feaa3..cca74eaa 100644 --- a/google/cloud/container_v1beta1/services/cluster_manager/transports/grpc_asyncio.py +++ b/google/cloud/container_v1beta1/services/cluster_manager/transports/grpc_asyncio.py @@ -15,9 +15,12 @@ # limitations under the License. # +import warnings from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple +from google.api_core import gapic_v1 # type: ignore from google.api_core import grpc_helpers_async # type: ignore +from google import auth # type: ignore from google.auth import credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore @@ -27,7 +30,7 @@ from google.cloud.container_v1beta1.types import cluster_service from google.protobuf import empty_pb2 as empty # type: ignore -from .base import ClusterManagerTransport +from .base import ClusterManagerTransport, DEFAULT_CLIENT_INFO from .grpc import ClusterManagerGrpcTransport @@ -54,7 +57,8 @@ def create_channel( credentials: credentials.Credentials = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, - **kwargs + quota_project_id: Optional[str] = None, + **kwargs, ) -> aio.Channel: """Create and return a gRPC AsyncIO channel object. Args: @@ -70,6 +74,8 @@ def create_channel( scopes (Optional[Sequence[str]]): A optional list of scopes needed for this service. These are only used when credentials are not specified and are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. kwargs (Optional[dict]): Keyword arguments, which are passed to the channel creation. Returns: @@ -81,7 +87,8 @@ def create_channel( credentials=credentials, credentials_file=credentials_file, scopes=scopes, - **kwargs + quota_project_id=quota_project_id, + **kwargs, ) def __init__( @@ -93,7 +100,10 @@ def __init__( scopes: Optional[Sequence[str]] = None, channel: aio.Channel = None, api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiate the transport. @@ -113,14 +123,23 @@ def __init__( are passed to :func:`google.auth.default`. channel (Optional[aio.Channel]): A ``Channel`` instance through which to make calls. - api_mtls_endpoint (Optional[str]): The mutual TLS endpoint. If - provided, it overrides the ``host`` argument and tries to create + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create a mutual TLS channel with client SSL credentials from ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): A - callback to provide client SSL certificate bytes and private key - bytes, both in PEM format. It is ignored if ``api_mtls_endpoint`` - is None. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. Raises: google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport @@ -136,12 +155,22 @@ def __init__( # If a channel was explicitly provided, set it. self._grpc_channel = channel elif api_mtls_endpoint: + warnings.warn( + "api_mtls_endpoint and client_cert_source are deprecated", + DeprecationWarning, + ) + host = ( api_mtls_endpoint if ":" in api_mtls_endpoint else api_mtls_endpoint + ":443" ) + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + # Create SSL credentials with client_cert_source or application # default SSL credentials. if client_cert_source: @@ -159,6 +188,24 @@ def __init__( credentials_file=credentials_file, ssl_credentials=ssl_credentials, scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + ) + else: + host = host if ":" in host else host + ":443" + + if credentials is None: + credentials, _ = auth.default( + scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id + ) + + # create a new channel. The provided one is ignored. + self._grpc_channel = type(self).create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + ssl_credentials=ssl_channel_credentials, + scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, ) # Run the base constructor. @@ -167,6 +214,8 @@ def __init__( credentials=credentials, credentials_file=credentials_file, scopes=scopes or self.AUTH_SCOPES, + quota_project_id=quota_project_id, + client_info=client_info, ) self._stubs = {} @@ -178,13 +227,6 @@ def grpc_channel(self) -> aio.Channel: This property caches on the instance; repeated calls return the same channel. """ - # Sanity check: Only create a new channel if we do not already - # have one. - if not hasattr(self, "_grpc_channel"): - self._grpc_channel = self.create_channel( - self._host, credentials=self._credentials, - ) - # Return the channel from cache. return self._grpc_channel diff --git a/google/cloud/container_v1beta1/types/cluster_service.py b/google/cloud/container_v1beta1/types/cluster_service.py index 5f7d60e8..545933c2 100644 --- a/google/cloud/container_v1beta1/types/cluster_service.py +++ b/google/cloud/container_v1beta1/types/cluster_service.py @@ -1565,11 +1565,11 @@ class Metric(proto.Message): name = proto.Field(proto.STRING, number=1) - int_value = proto.Field(proto.INT64, number=2) + int_value = proto.Field(proto.INT64, number=2, oneof="value") - double_value = proto.Field(proto.DOUBLE, number=3) + double_value = proto.Field(proto.DOUBLE, number=3, oneof="value") - string_value = proto.Field(proto.STRING, number=4) + string_value = proto.Field(proto.STRING, number=4, oneof="value") name = proto.Field(proto.STRING, number=1) @@ -2653,11 +2653,11 @@ class MaintenanceWindow(proto.Message): """ daily_maintenance_window = proto.Field( - proto.MESSAGE, number=2, message="DailyMaintenanceWindow", + proto.MESSAGE, number=2, oneof="policy", message="DailyMaintenanceWindow", ) recurring_window = proto.Field( - proto.MESSAGE, number=3, message="RecurringTimeWindow", + proto.MESSAGE, number=3, oneof="policy", message="RecurringTimeWindow", ) maintenance_exclusions = proto.MapField( diff --git a/noxfile.py b/noxfile.py index 9608c849..810c9541 100644 --- a/noxfile.py +++ b/noxfile.py @@ -102,6 +102,10 @@ def system(session): """Run the system test suite.""" system_test_path = os.path.join("tests", "system.py") system_test_folder_path = os.path.join("tests", "system") + + # Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true. + if os.environ.get("RUN_SYSTEM_TESTS", "true") == "false": + session.skip("RUN_SYSTEM_TESTS is set to false, skipping") # Sanity check: Only run tests if the environment variable is set. if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""): session.skip("Credentials must be set via environment variable") @@ -162,3 +166,38 @@ def docs(session): os.path.join("docs", ""), os.path.join("docs", "_build", "html", ""), ) + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def docfx(session): + """Build the docfx yaml files for this library.""" + + session.install("-e", ".") + # sphinx-docfx-yaml supports up to sphinx version 1.5.5. + # https://github.com/docascode/sphinx-docfx-yaml/issues/97 + session.install("sphinx==1.5.5", "alabaster", "recommonmark", "sphinx-docfx-yaml") + + shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) + session.run( + "sphinx-build", + "-T", # show full traceback on exception + "-N", # no colors + "-D", + ( + "extensions=sphinx.ext.autodoc," + "sphinx.ext.autosummary," + "docfx_yaml.extension," + "sphinx.ext.intersphinx," + "sphinx.ext.coverage," + "sphinx.ext.napoleon," + "sphinx.ext.todo," + "sphinx.ext.viewcode," + "recommonmark" + ), + "-b", + "html", + "-d", + os.path.join("docs", "_build", "doctrees", ""), + os.path.join("docs", ""), + os.path.join("docs", "_build", "html", ""), + ) diff --git a/scripts/decrypt-secrets.sh b/scripts/decrypt-secrets.sh index ff599eb2..21f6d2a2 100755 --- a/scripts/decrypt-secrets.sh +++ b/scripts/decrypt-secrets.sh @@ -20,14 +20,27 @@ ROOT=$( dirname "$DIR" ) # Work from the project root. cd $ROOT +# Prevent it from overriding files. +# We recommend that sample authors use their own service account files and cloud project. +# In that case, they are supposed to prepare these files by themselves. +if [[ -f "testing/test-env.sh" ]] || \ + [[ -f "testing/service-account.json" ]] || \ + [[ -f "testing/client-secrets.json" ]]; then + echo "One or more target files exist, aborting." + exit 1 +fi + # Use SECRET_MANAGER_PROJECT if set, fallback to cloud-devrel-kokoro-resources. PROJECT_ID="${SECRET_MANAGER_PROJECT:-cloud-devrel-kokoro-resources}" gcloud secrets versions access latest --secret="python-docs-samples-test-env" \ + --project="${PROJECT_ID}" \ > testing/test-env.sh gcloud secrets versions access latest \ --secret="python-docs-samples-service-account" \ + --project="${PROJECT_ID}" \ > testing/service-account.json gcloud secrets versions access latest \ --secret="python-docs-samples-client-secrets" \ - > testing/client-secrets.json \ No newline at end of file + --project="${PROJECT_ID}" \ + > testing/client-secrets.json diff --git a/scripts/fixup_container_v1_keywords.py b/scripts/fixup_container_v1_keywords.py index 8bb63c78..1c87db60 100644 --- a/scripts/fixup_container_v1_keywords.py +++ b/scripts/fixup_container_v1_keywords.py @@ -71,6 +71,7 @@ class containerCallTransformer(cst.CSTTransformer): 'update_cluster': ('update', 'project_id', 'zone', 'cluster_id', 'name', ), 'update_master': ('master_version', 'project_id', 'zone', 'cluster_id', 'name', ), 'update_node_pool': ('node_version', 'image_type', 'project_id', 'zone', 'cluster_id', 'node_pool_id', 'name', ), + } def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: diff --git a/scripts/fixup_container_v1beta1_keywords.py b/scripts/fixup_container_v1beta1_keywords.py index 61c23309..0c1ba06e 100644 --- a/scripts/fixup_container_v1beta1_keywords.py +++ b/scripts/fixup_container_v1beta1_keywords.py @@ -72,6 +72,7 @@ class containerCallTransformer(cst.CSTTransformer): 'update_cluster': ('project_id', 'zone', 'cluster_id', 'update', 'name', ), 'update_master': ('project_id', 'zone', 'cluster_id', 'master_version', 'name', ), 'update_node_pool': ('project_id', 'zone', 'cluster_id', 'node_pool_id', 'node_version', 'image_type', 'workload_metadata_config', 'name', ), + } def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: diff --git a/setup.py b/setup.py index 78c73583..bcc7b09b 100644 --- a/setup.py +++ b/setup.py @@ -29,9 +29,9 @@ # 'Development Status :: 5 - Production/Stable' release_status = "Development Status :: 5 - Production/Stable" dependencies = [ - "google-api-core[grpc] >= 1.21.0, < 2.0.0dev", + "google-api-core[grpc] >= 1.21.2, < 2.0.0dev", "grpc-google-iam-v1 >= 0.12.3, < 0.13dev", - "proto-plus >= 0.4.0", + "proto-plus >= 1.4.0", "libcst >= 0.2.5", ] extras = {} diff --git a/synth.metadata b/synth.metadata index 602ab60c..8e1254cb 100644 --- a/synth.metadata +++ b/synth.metadata @@ -4,14 +4,14 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/python-container.git", - "sha": "761f12af56bc24306b874c5fc60a8c958232a252" + "sha": "e9e9e85f3177009648c1f77e242b6925b1f0428e" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "2a5693326b1e708ea8464b4cb06ea2894691c365" + "sha": "dba48bb9bc6959c232bec9150ac6313b608fe7bd" } } ], diff --git a/synth.py b/synth.py index 0b0c79d7..2ca4a663 100644 --- a/synth.py +++ b/synth.py @@ -63,6 +63,13 @@ f"google.cloud.container_{version}", ) +# Fix package name +s.replace( + "google/cloud/**/*.py", + "google-container", + "google-cloud-container" +) + # Issues exist where python files should define the source encoding # https://github.com/googleapis/gapic-generator/issues/2097 s.replace( diff --git a/tests/unit/gapic/container_v1/__init__.py b/tests/unit/gapic/container_v1/__init__.py index e69de29b..8b137891 100644 --- a/tests/unit/gapic/container_v1/__init__.py +++ b/tests/unit/gapic/container_v1/__init__.py @@ -0,0 +1 @@ + diff --git a/tests/unit/gapic/container_v1/test_cluster_manager.py b/tests/unit/gapic/container_v1/test_cluster_manager.py index edcf004c..ff61986a 100644 --- a/tests/unit/gapic/container_v1/test_cluster_manager.py +++ b/tests/unit/gapic/container_v1/test_cluster_manager.py @@ -22,6 +22,7 @@ from grpc.experimental import aio import math import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule from google import auth from google.api_core import client_options @@ -44,6 +45,17 @@ def client_cert_source_callback(): return b"cert bytes", b"key bytes" +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + def test__get_default_mtls_endpoint(): api_endpoint = "example.googleapis.com" api_mtls_endpoint = "example.mtls.googleapis.com" @@ -110,6 +122,16 @@ def test_cluster_manager_client_get_transport_class(): ), ], ) +@mock.patch.object( + ClusterManagerClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(ClusterManagerClient), +) +@mock.patch.object( + ClusterManagerAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(ClusterManagerAsyncClient), +) def test_cluster_manager_client_client_options( client_class, transport_class, transport_name ): @@ -134,103 +156,207 @@ def test_cluster_manager_client_client_options( credentials_file=None, host="squid.clam.whelk", scopes=None, - api_mtls_endpoint="squid.clam.whelk", - client_cert_source=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, ) - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS is + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is # "never". - os.environ["GOOGLE_API_USE_MTLS"] = "never" - with mock.patch.object(transport_class, "__init__") as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - api_mtls_endpoint=client.DEFAULT_ENDPOINT, - client_cert_source=None, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS is - # "always". - os.environ["GOOGLE_API_USE_MTLS"] = "always" - with mock.patch.object(transport_class, "__init__") as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT, - client_cert_source=None, - ) - - # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is - # "auto", and client_cert_source is provided. - os.environ["GOOGLE_API_USE_MTLS"] = "auto" - options = client_options.ClientOptions( - client_cert_source=client_cert_source_callback - ) - with mock.patch.object(transport_class, "__init__") as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT, - client_cert_source=client_cert_source_callback, - ) - - # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is - # "auto", and default_client_cert_source is provided. - os.environ["GOOGLE_API_USE_MTLS"] = "auto" - with mock.patch.object(transport_class, "__init__") as patched: - with mock.patch( - "google.auth.transport.mtls.has_default_client_cert_source", - return_value=True, - ): + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class() patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, + host=client.DEFAULT_ENDPOINT, scopes=None, - api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT, - client_cert_source=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, ) - # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is - # "auto", but client_cert_source and default_client_cert_source are None. - os.environ["GOOGLE_API_USE_MTLS"] = "auto" - with mock.patch.object(transport_class, "__init__") as patched: - with mock.patch( - "google.auth.transport.mtls.has_default_client_cert_source", - return_value=False, - ): + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class() patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client.DEFAULT_MTLS_ENDPOINT, scopes=None, - api_mtls_endpoint=client.DEFAULT_ENDPOINT, - client_cert_source=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, ) - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS has + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has # unsupported value. - os.environ["GOOGLE_API_USE_MTLS"] = "Unsupported" - with pytest.raises(MutualTLSChannelError): - client = client_class() + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + (ClusterManagerClient, transports.ClusterManagerGrpcTransport, "grpc", "true"), + ( + ClusterManagerAsyncClient, + transports.ClusterManagerGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + (ClusterManagerClient, transports.ClusterManagerGrpcTransport, "grpc", "false"), + ( + ClusterManagerAsyncClient, + transports.ClusterManagerGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ], +) +@mock.patch.object( + ClusterManagerClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(ClusterManagerClient), +) +@mock.patch.object( + ClusterManagerAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(ClusterManagerAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_cluster_manager_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - del os.environ["GOOGLE_API_USE_MTLS"] + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + ssl_channel_creds = mock.Mock() + with mock.patch( + "grpc.ssl_channel_credentials", return_value=ssl_channel_creds + ): + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_ssl_channel_creds = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_ssl_channel_creds = ssl_channel_creds + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + ssl_channel_credentials=expected_ssl_channel_creds, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.__init__", return_value=None + ): + with mock.patch( + "google.auth.transport.grpc.SslCredentials.is_mtls", + new_callable=mock.PropertyMock, + ) as is_mtls_mock: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.ssl_credentials", + new_callable=mock.PropertyMock, + ) as ssl_credentials_mock: + if use_client_cert_env == "false": + is_mtls_mock.return_value = False + ssl_credentials_mock.return_value = None + expected_host = client.DEFAULT_ENDPOINT + expected_ssl_channel_creds = None + else: + is_mtls_mock.return_value = True + ssl_credentials_mock.return_value = mock.Mock() + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_ssl_channel_creds = ( + ssl_credentials_mock.return_value + ) + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + ssl_channel_credentials=expected_ssl_channel_creds, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.__init__", return_value=None + ): + with mock.patch( + "google.auth.transport.grpc.SslCredentials.is_mtls", + new_callable=mock.PropertyMock, + ) as is_mtls_mock: + is_mtls_mock.return_value = False + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) @pytest.mark.parametrize( @@ -257,8 +383,9 @@ def test_cluster_manager_client_client_options_scopes( credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=["1", "2"], - api_mtls_endpoint=client.DEFAULT_ENDPOINT, - client_cert_source=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -286,8 +413,9 @@ def test_cluster_manager_client_client_options_credentials_file( credentials_file="credentials.json", host=client.DEFAULT_ENDPOINT, scopes=None, - api_mtls_endpoint=client.DEFAULT_ENDPOINT, - client_cert_source=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -304,19 +432,22 @@ def test_cluster_manager_client_client_options_from_dict(): credentials_file=None, host="squid.clam.whelk", scopes=None, - api_mtls_endpoint="squid.clam.whelk", - client_cert_source=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, ) -def test_list_clusters(transport: str = "grpc"): +def test_list_clusters( + transport: str = "grpc", request_type=cluster_service.ListClustersRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.ListClustersRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client._transport.list_clusters), "__call__") as call: @@ -331,7 +462,7 @@ def test_list_clusters(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.ListClustersRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.ListClustersResponse) @@ -339,6 +470,10 @@ def test_list_clusters(transport: str = "grpc"): assert response.missing_zones == ["missing_zones_value"] +def test_list_clusters_from_dict(): + test_list_clusters(request_type=dict) + + @pytest.mark.asyncio async def test_list_clusters_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -443,8 +578,11 @@ def test_list_clusters_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].parent == "parent_value" @@ -486,8 +624,11 @@ async def test_list_clusters_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].parent == "parent_value" @@ -506,14 +647,16 @@ async def test_list_clusters_flattened_error_async(): ) -def test_get_cluster(transport: str = "grpc"): +def test_get_cluster( + transport: str = "grpc", request_type=cluster_service.GetClusterRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.GetClusterRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client._transport.get_cluster), "__call__") as call: @@ -555,7 +698,7 @@ def test_get_cluster(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.GetClusterRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.Cluster) @@ -617,6 +760,10 @@ def test_get_cluster(transport: str = "grpc"): assert response.tpu_ipv4_cidr_block == "tpu_ipv4_cidr_block_value" +def test_get_cluster_from_dict(): + test_get_cluster(request_type=dict) + + @pytest.mark.asyncio async def test_get_cluster_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -807,9 +954,13 @@ def test_get_cluster_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].name == "name_value" @@ -855,9 +1006,13 @@ async def test_get_cluster_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].name == "name_value" @@ -877,14 +1032,16 @@ async def test_get_cluster_flattened_error_async(): ) -def test_create_cluster(transport: str = "grpc"): +def test_create_cluster( + transport: str = "grpc", request_type=cluster_service.CreateClusterRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.CreateClusterRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client._transport.create_cluster), "__call__") as call: @@ -909,7 +1066,7 @@ def test_create_cluster(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.CreateClusterRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.Operation) @@ -937,6 +1094,10 @@ def test_create_cluster(transport: str = "grpc"): assert response.end_time == "end_time_value" +def test_create_cluster_from_dict(): + test_create_cluster(request_type=dict) + + @pytest.mark.asyncio async def test_create_cluster_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -1076,9 +1237,13 @@ def test_create_cluster_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster == cluster_service.Cluster(name="name_value") + assert args[0].parent == "parent_value" @@ -1124,9 +1289,13 @@ async def test_create_cluster_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster == cluster_service.Cluster(name="name_value") + assert args[0].parent == "parent_value" @@ -1146,14 +1315,16 @@ async def test_create_cluster_flattened_error_async(): ) -def test_update_cluster(transport: str = "grpc"): +def test_update_cluster( + transport: str = "grpc", request_type=cluster_service.UpdateClusterRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.UpdateClusterRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client._transport.update_cluster), "__call__") as call: @@ -1178,7 +1349,7 @@ def test_update_cluster(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.UpdateClusterRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.Operation) @@ -1206,6 +1377,10 @@ def test_update_cluster(transport: str = "grpc"): assert response.end_time == "end_time_value" +def test_update_cluster_from_dict(): + test_update_cluster(request_type=dict) + + @pytest.mark.asyncio async def test_update_cluster_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -1348,12 +1523,17 @@ def test_update_cluster_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].update == cluster_service.ClusterUpdate( desired_node_version="desired_node_version_value" ) + assert args[0].name == "name_value" @@ -1405,12 +1585,17 @@ async def test_update_cluster_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].update == cluster_service.ClusterUpdate( desired_node_version="desired_node_version_value" ) + assert args[0].name == "name_value" @@ -1433,14 +1618,16 @@ async def test_update_cluster_flattened_error_async(): ) -def test_update_node_pool(transport: str = "grpc"): +def test_update_node_pool( + transport: str = "grpc", request_type=cluster_service.UpdateNodePoolRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.UpdateNodePoolRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -1467,7 +1654,7 @@ def test_update_node_pool(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.UpdateNodePoolRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.Operation) @@ -1495,6 +1682,10 @@ def test_update_node_pool(transport: str = "grpc"): assert response.end_time == "end_time_value" +def test_update_node_pool_from_dict(): + test_update_node_pool(request_type=dict) + + @pytest.mark.asyncio async def test_update_node_pool_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -1615,14 +1806,16 @@ async def test_update_node_pool_field_headers_async(): assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] -def test_set_node_pool_autoscaling(transport: str = "grpc"): +def test_set_node_pool_autoscaling( + transport: str = "grpc", request_type=cluster_service.SetNodePoolAutoscalingRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.SetNodePoolAutoscalingRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -1649,7 +1842,7 @@ def test_set_node_pool_autoscaling(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.SetNodePoolAutoscalingRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.Operation) @@ -1677,6 +1870,10 @@ def test_set_node_pool_autoscaling(transport: str = "grpc"): assert response.end_time == "end_time_value" +def test_set_node_pool_autoscaling_from_dict(): + test_set_node_pool_autoscaling(request_type=dict) + + @pytest.mark.asyncio async def test_set_node_pool_autoscaling_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -1797,14 +1994,16 @@ async def test_set_node_pool_autoscaling_field_headers_async(): assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] -def test_set_logging_service(transport: str = "grpc"): +def test_set_logging_service( + transport: str = "grpc", request_type=cluster_service.SetLoggingServiceRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.SetLoggingServiceRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -1831,7 +2030,7 @@ def test_set_logging_service(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.SetLoggingServiceRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.Operation) @@ -1859,6 +2058,10 @@ def test_set_logging_service(transport: str = "grpc"): assert response.end_time == "end_time_value" +def test_set_logging_service_from_dict(): + test_set_logging_service(request_type=dict) + + @pytest.mark.asyncio async def test_set_logging_service_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -2003,10 +2206,15 @@ def test_set_logging_service_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].logging_service == "logging_service_value" + assert args[0].name == "name_value" @@ -2054,10 +2262,15 @@ async def test_set_logging_service_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].logging_service == "logging_service_value" + assert args[0].name == "name_value" @@ -2078,14 +2291,16 @@ async def test_set_logging_service_flattened_error_async(): ) -def test_set_monitoring_service(transport: str = "grpc"): +def test_set_monitoring_service( + transport: str = "grpc", request_type=cluster_service.SetMonitoringServiceRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.SetMonitoringServiceRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -2112,7 +2327,7 @@ def test_set_monitoring_service(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.SetMonitoringServiceRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.Operation) @@ -2140,6 +2355,10 @@ def test_set_monitoring_service(transport: str = "grpc"): assert response.end_time == "end_time_value" +def test_set_monitoring_service_from_dict(): + test_set_monitoring_service(request_type=dict) + + @pytest.mark.asyncio async def test_set_monitoring_service_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -2284,10 +2503,15 @@ def test_set_monitoring_service_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].monitoring_service == "monitoring_service_value" + assert args[0].name == "name_value" @@ -2335,10 +2559,15 @@ async def test_set_monitoring_service_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].monitoring_service == "monitoring_service_value" + assert args[0].name == "name_value" @@ -2359,14 +2588,16 @@ async def test_set_monitoring_service_flattened_error_async(): ) -def test_set_addons_config(transport: str = "grpc"): +def test_set_addons_config( + transport: str = "grpc", request_type=cluster_service.SetAddonsConfigRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.SetAddonsConfigRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -2393,7 +2624,7 @@ def test_set_addons_config(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.SetAddonsConfigRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.Operation) @@ -2421,6 +2652,10 @@ def test_set_addons_config(transport: str = "grpc"): assert response.end_time == "end_time_value" +def test_set_addons_config_from_dict(): + test_set_addons_config(request_type=dict) + + @pytest.mark.asyncio async def test_set_addons_config_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -2567,12 +2802,17 @@ def test_set_addons_config_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].addons_config == cluster_service.AddonsConfig( http_load_balancing=cluster_service.HttpLoadBalancing(disabled=True) ) + assert args[0].name == "name_value" @@ -2624,12 +2864,17 @@ async def test_set_addons_config_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].addons_config == cluster_service.AddonsConfig( http_load_balancing=cluster_service.HttpLoadBalancing(disabled=True) ) + assert args[0].name == "name_value" @@ -2652,14 +2897,16 @@ async def test_set_addons_config_flattened_error_async(): ) -def test_set_locations(transport: str = "grpc"): +def test_set_locations( + transport: str = "grpc", request_type=cluster_service.SetLocationsRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.SetLocationsRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client._transport.set_locations), "__call__") as call: @@ -2684,7 +2931,7 @@ def test_set_locations(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.SetLocationsRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.Operation) @@ -2712,6 +2959,10 @@ def test_set_locations(transport: str = "grpc"): assert response.end_time == "end_time_value" +def test_set_locations_from_dict(): + test_set_locations(request_type=dict) + + @pytest.mark.asyncio async def test_set_locations_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -2852,10 +3103,15 @@ def test_set_locations_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].locations == ["locations_value"] + assert args[0].name == "name_value" @@ -2903,10 +3159,15 @@ async def test_set_locations_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].locations == ["locations_value"] + assert args[0].name == "name_value" @@ -2927,14 +3188,16 @@ async def test_set_locations_flattened_error_async(): ) -def test_update_master(transport: str = "grpc"): +def test_update_master( + transport: str = "grpc", request_type=cluster_service.UpdateMasterRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.UpdateMasterRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client._transport.update_master), "__call__") as call: @@ -2959,7 +3222,7 @@ def test_update_master(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.UpdateMasterRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.Operation) @@ -2987,6 +3250,10 @@ def test_update_master(transport: str = "grpc"): assert response.end_time == "end_time_value" +def test_update_master_from_dict(): + test_update_master(request_type=dict) + + @pytest.mark.asyncio async def test_update_master_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -3127,10 +3394,15 @@ def test_update_master_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].master_version == "master_version_value" + assert args[0].name == "name_value" @@ -3178,10 +3450,15 @@ async def test_update_master_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].master_version == "master_version_value" + assert args[0].name == "name_value" @@ -3202,14 +3479,16 @@ async def test_update_master_flattened_error_async(): ) -def test_set_master_auth(transport: str = "grpc"): +def test_set_master_auth( + transport: str = "grpc", request_type=cluster_service.SetMasterAuthRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.SetMasterAuthRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client._transport.set_master_auth), "__call__") as call: @@ -3234,7 +3513,7 @@ def test_set_master_auth(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.SetMasterAuthRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.Operation) @@ -3262,6 +3541,10 @@ def test_set_master_auth(transport: str = "grpc"): assert response.end_time == "end_time_value" +def test_set_master_auth_from_dict(): + test_set_master_auth(request_type=dict) + + @pytest.mark.asyncio async def test_set_master_auth_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -3380,14 +3663,16 @@ async def test_set_master_auth_field_headers_async(): assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] -def test_delete_cluster(transport: str = "grpc"): +def test_delete_cluster( + transport: str = "grpc", request_type=cluster_service.DeleteClusterRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.DeleteClusterRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client._transport.delete_cluster), "__call__") as call: @@ -3412,7 +3697,7 @@ def test_delete_cluster(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.DeleteClusterRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.Operation) @@ -3440,6 +3725,10 @@ def test_delete_cluster(transport: str = "grpc"): assert response.end_time == "end_time_value" +def test_delete_cluster_from_dict(): + test_delete_cluster(request_type=dict) + + @pytest.mark.asyncio async def test_delete_cluster_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -3579,9 +3868,13 @@ def test_delete_cluster_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].name == "name_value" @@ -3627,9 +3920,13 @@ async def test_delete_cluster_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].name == "name_value" @@ -3649,14 +3946,16 @@ async def test_delete_cluster_flattened_error_async(): ) -def test_list_operations(transport: str = "grpc"): +def test_list_operations( + transport: str = "grpc", request_type=cluster_service.ListOperationsRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.ListOperationsRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client._transport.list_operations), "__call__") as call: @@ -3671,7 +3970,7 @@ def test_list_operations(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.ListOperationsRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.ListOperationsResponse) @@ -3679,6 +3978,10 @@ def test_list_operations(transport: str = "grpc"): assert response.missing_zones == ["missing_zones_value"] +def test_list_operations_from_dict(): + test_list_operations(request_type=dict) + + @pytest.mark.asyncio async def test_list_operations_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -3785,7 +4088,9 @@ def test_list_operations_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" @@ -3826,7 +4131,9 @@ async def test_list_operations_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" @@ -3844,14 +4151,16 @@ async def test_list_operations_flattened_error_async(): ) -def test_get_operation(transport: str = "grpc"): +def test_get_operation( + transport: str = "grpc", request_type=cluster_service.GetOperationRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.GetOperationRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client._transport.get_operation), "__call__") as call: @@ -3876,7 +4185,7 @@ def test_get_operation(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.GetOperationRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.Operation) @@ -3904,6 +4213,10 @@ def test_get_operation(transport: str = "grpc"): assert response.end_time == "end_time_value" +def test_get_operation_from_dict(): + test_get_operation(request_type=dict) + + @pytest.mark.asyncio async def test_get_operation_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -4042,8 +4355,11 @@ def test_get_operation_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].operation_id == "operation_id_value" @@ -4087,8 +4403,11 @@ async def test_get_operation_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].operation_id == "operation_id_value" @@ -4107,14 +4426,16 @@ async def test_get_operation_flattened_error_async(): ) -def test_cancel_operation(transport: str = "grpc"): +def test_cancel_operation( + transport: str = "grpc", request_type=cluster_service.CancelOperationRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.CancelOperationRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -4129,12 +4450,16 @@ def test_cancel_operation(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.CancelOperationRequest() # Establish that the response is the type that we expect. assert response is None +def test_cancel_operation_from_dict(): + test_cancel_operation(request_type=dict) + + @pytest.mark.asyncio async def test_cancel_operation_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -4240,9 +4565,13 @@ def test_cancel_operation_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].operation_id == "operation_id_value" + assert args[0].name == "name_value" @@ -4286,9 +4615,13 @@ async def test_cancel_operation_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].operation_id == "operation_id_value" + assert args[0].name == "name_value" @@ -4308,14 +4641,16 @@ async def test_cancel_operation_flattened_error_async(): ) -def test_get_server_config(transport: str = "grpc"): +def test_get_server_config( + transport: str = "grpc", request_type=cluster_service.GetServerConfigRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.GetServerConfigRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -4336,7 +4671,7 @@ def test_get_server_config(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.GetServerConfigRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.ServerConfig) @@ -4352,6 +4687,10 @@ def test_get_server_config(transport: str = "grpc"): assert response.valid_master_versions == ["valid_master_versions_value"] +def test_get_server_config_from_dict(): + test_get_server_config(request_type=dict) + + @pytest.mark.asyncio async def test_get_server_config_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -4474,8 +4813,11 @@ def test_get_server_config_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].name == "name_value" @@ -4517,8 +4859,11 @@ async def test_get_server_config_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].name == "name_value" @@ -4537,14 +4882,16 @@ async def test_get_server_config_flattened_error_async(): ) -def test_list_node_pools(transport: str = "grpc"): +def test_list_node_pools( + transport: str = "grpc", request_type=cluster_service.ListNodePoolsRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.ListNodePoolsRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client._transport.list_node_pools), "__call__") as call: @@ -4557,12 +4904,16 @@ def test_list_node_pools(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.ListNodePoolsRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.ListNodePoolsResponse) +def test_list_node_pools_from_dict(): + test_list_node_pools(request_type=dict) + + @pytest.mark.asyncio async def test_list_node_pools_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -4668,9 +5019,13 @@ def test_list_node_pools_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].parent == "parent_value" @@ -4716,9 +5071,13 @@ async def test_list_node_pools_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].parent == "parent_value" @@ -4738,14 +5097,16 @@ async def test_list_node_pools_flattened_error_async(): ) -def test_get_node_pool(transport: str = "grpc"): +def test_get_node_pool( + transport: str = "grpc", request_type=cluster_service.GetNodePoolRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.GetNodePoolRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client._transport.get_node_pool), "__call__") as call: @@ -4767,7 +5128,7 @@ def test_get_node_pool(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.GetNodePoolRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.NodePool) @@ -4789,6 +5150,10 @@ def test_get_node_pool(transport: str = "grpc"): assert response.pod_ipv4_cidr_size == 1856 +def test_get_node_pool_from_dict(): + test_get_node_pool(request_type=dict) + + @pytest.mark.asyncio async def test_get_node_pool_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -4920,10 +5285,15 @@ def test_get_node_pool_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].node_pool_id == "node_pool_id_value" + assert args[0].name == "name_value" @@ -4971,10 +5341,15 @@ async def test_get_node_pool_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].node_pool_id == "node_pool_id_value" + assert args[0].name == "name_value" @@ -4995,14 +5370,16 @@ async def test_get_node_pool_flattened_error_async(): ) -def test_create_node_pool(transport: str = "grpc"): +def test_create_node_pool( + transport: str = "grpc", request_type=cluster_service.CreateNodePoolRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.CreateNodePoolRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -5029,7 +5406,7 @@ def test_create_node_pool(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.CreateNodePoolRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.Operation) @@ -5057,6 +5434,10 @@ def test_create_node_pool(transport: str = "grpc"): assert response.end_time == "end_time_value" +def test_create_node_pool_from_dict(): + test_create_node_pool(request_type=dict) + + @pytest.mark.asyncio async def test_create_node_pool_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -5201,10 +5582,15 @@ def test_create_node_pool_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].node_pool == cluster_service.NodePool(name="name_value") + assert args[0].parent == "parent_value" @@ -5252,10 +5638,15 @@ async def test_create_node_pool_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].node_pool == cluster_service.NodePool(name="name_value") + assert args[0].parent == "parent_value" @@ -5276,14 +5667,16 @@ async def test_create_node_pool_flattened_error_async(): ) -def test_delete_node_pool(transport: str = "grpc"): +def test_delete_node_pool( + transport: str = "grpc", request_type=cluster_service.DeleteNodePoolRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.DeleteNodePoolRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -5310,7 +5703,7 @@ def test_delete_node_pool(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.DeleteNodePoolRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.Operation) @@ -5338,6 +5731,10 @@ def test_delete_node_pool(transport: str = "grpc"): assert response.end_time == "end_time_value" +def test_delete_node_pool_from_dict(): + test_delete_node_pool(request_type=dict) + + @pytest.mark.asyncio async def test_delete_node_pool_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -5482,10 +5879,15 @@ def test_delete_node_pool_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].node_pool_id == "node_pool_id_value" + assert args[0].name == "name_value" @@ -5533,10 +5935,15 @@ async def test_delete_node_pool_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].node_pool_id == "node_pool_id_value" + assert args[0].name == "name_value" @@ -5557,14 +5964,16 @@ async def test_delete_node_pool_flattened_error_async(): ) -def test_rollback_node_pool_upgrade(transport: str = "grpc"): +def test_rollback_node_pool_upgrade( + transport: str = "grpc", request_type=cluster_service.RollbackNodePoolUpgradeRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.RollbackNodePoolUpgradeRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -5591,7 +6000,7 @@ def test_rollback_node_pool_upgrade(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.RollbackNodePoolUpgradeRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.Operation) @@ -5619,6 +6028,10 @@ def test_rollback_node_pool_upgrade(transport: str = "grpc"): assert response.end_time == "end_time_value" +def test_rollback_node_pool_upgrade_from_dict(): + test_rollback_node_pool_upgrade(request_type=dict) + + @pytest.mark.asyncio async def test_rollback_node_pool_upgrade_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -5763,10 +6176,15 @@ def test_rollback_node_pool_upgrade_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].node_pool_id == "node_pool_id_value" + assert args[0].name == "name_value" @@ -5814,10 +6232,15 @@ async def test_rollback_node_pool_upgrade_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].node_pool_id == "node_pool_id_value" + assert args[0].name == "name_value" @@ -5838,14 +6261,16 @@ async def test_rollback_node_pool_upgrade_flattened_error_async(): ) -def test_set_node_pool_management(transport: str = "grpc"): +def test_set_node_pool_management( + transport: str = "grpc", request_type=cluster_service.SetNodePoolManagementRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.SetNodePoolManagementRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -5872,7 +6297,7 @@ def test_set_node_pool_management(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.SetNodePoolManagementRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.Operation) @@ -5900,6 +6325,10 @@ def test_set_node_pool_management(transport: str = "grpc"): assert response.end_time == "end_time_value" +def test_set_node_pool_management_from_dict(): + test_set_node_pool_management(request_type=dict) + + @pytest.mark.asyncio async def test_set_node_pool_management_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -6020,14 +6449,16 @@ async def test_set_node_pool_management_field_headers_async(): assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] -def test_set_labels(transport: str = "grpc"): +def test_set_labels( + transport: str = "grpc", request_type=cluster_service.SetLabelsRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.SetLabelsRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client._transport.set_labels), "__call__") as call: @@ -6052,7 +6483,7 @@ def test_set_labels(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.SetLabelsRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.Operation) @@ -6080,6 +6511,10 @@ def test_set_labels(transport: str = "grpc"): assert response.end_time == "end_time_value" +def test_set_labels_from_dict(): + test_set_labels(request_type=dict) + + @pytest.mark.asyncio async def test_set_labels_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -6198,14 +6633,16 @@ async def test_set_labels_field_headers_async(): assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] -def test_set_legacy_abac(transport: str = "grpc"): +def test_set_legacy_abac( + transport: str = "grpc", request_type=cluster_service.SetLegacyAbacRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.SetLegacyAbacRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client._transport.set_legacy_abac), "__call__") as call: @@ -6230,7 +6667,7 @@ def test_set_legacy_abac(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.SetLegacyAbacRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.Operation) @@ -6258,6 +6695,10 @@ def test_set_legacy_abac(transport: str = "grpc"): assert response.end_time == "end_time_value" +def test_set_legacy_abac_from_dict(): + test_set_legacy_abac(request_type=dict) + + @pytest.mark.asyncio async def test_set_legacy_abac_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -6398,10 +6839,15 @@ def test_set_legacy_abac_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].enabled == True + assert args[0].name == "name_value" @@ -6449,10 +6895,15 @@ async def test_set_legacy_abac_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].enabled == True + assert args[0].name == "name_value" @@ -6473,14 +6924,16 @@ async def test_set_legacy_abac_flattened_error_async(): ) -def test_start_ip_rotation(transport: str = "grpc"): +def test_start_ip_rotation( + transport: str = "grpc", request_type=cluster_service.StartIPRotationRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.StartIPRotationRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -6507,7 +6960,7 @@ def test_start_ip_rotation(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.StartIPRotationRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.Operation) @@ -6535,6 +6988,10 @@ def test_start_ip_rotation(transport: str = "grpc"): assert response.end_time == "end_time_value" +def test_start_ip_rotation_from_dict(): + test_start_ip_rotation(request_type=dict) + + @pytest.mark.asyncio async def test_start_ip_rotation_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -6678,9 +7135,13 @@ def test_start_ip_rotation_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].name == "name_value" @@ -6726,9 +7187,13 @@ async def test_start_ip_rotation_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].name == "name_value" @@ -6748,14 +7213,16 @@ async def test_start_ip_rotation_flattened_error_async(): ) -def test_complete_ip_rotation(transport: str = "grpc"): +def test_complete_ip_rotation( + transport: str = "grpc", request_type=cluster_service.CompleteIPRotationRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.CompleteIPRotationRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -6782,7 +7249,7 @@ def test_complete_ip_rotation(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.CompleteIPRotationRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.Operation) @@ -6810,6 +7277,10 @@ def test_complete_ip_rotation(transport: str = "grpc"): assert response.end_time == "end_time_value" +def test_complete_ip_rotation_from_dict(): + test_complete_ip_rotation(request_type=dict) + + @pytest.mark.asyncio async def test_complete_ip_rotation_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -6953,9 +7424,13 @@ def test_complete_ip_rotation_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].name == "name_value" @@ -7001,9 +7476,13 @@ async def test_complete_ip_rotation_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].name == "name_value" @@ -7023,14 +7502,16 @@ async def test_complete_ip_rotation_flattened_error_async(): ) -def test_set_node_pool_size(transport: str = "grpc"): +def test_set_node_pool_size( + transport: str = "grpc", request_type=cluster_service.SetNodePoolSizeRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.SetNodePoolSizeRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -7057,7 +7538,7 @@ def test_set_node_pool_size(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.SetNodePoolSizeRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.Operation) @@ -7085,6 +7566,10 @@ def test_set_node_pool_size(transport: str = "grpc"): assert response.end_time == "end_time_value" +def test_set_node_pool_size_from_dict(): + test_set_node_pool_size(request_type=dict) + + @pytest.mark.asyncio async def test_set_node_pool_size_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -7205,14 +7690,16 @@ async def test_set_node_pool_size_field_headers_async(): assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] -def test_set_network_policy(transport: str = "grpc"): +def test_set_network_policy( + transport: str = "grpc", request_type=cluster_service.SetNetworkPolicyRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.SetNetworkPolicyRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -7239,7 +7726,7 @@ def test_set_network_policy(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.SetNetworkPolicyRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.Operation) @@ -7267,6 +7754,10 @@ def test_set_network_policy(transport: str = "grpc"): assert response.end_time == "end_time_value" +def test_set_network_policy_from_dict(): + test_set_network_policy(request_type=dict) + + @pytest.mark.asyncio async def test_set_network_policy_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -7413,12 +7904,17 @@ def test_set_network_policy_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].network_policy == cluster_service.NetworkPolicy( provider=cluster_service.NetworkPolicy.Provider.CALICO ) + assert args[0].name == "name_value" @@ -7470,12 +7966,17 @@ async def test_set_network_policy_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].network_policy == cluster_service.NetworkPolicy( provider=cluster_service.NetworkPolicy.Provider.CALICO ) + assert args[0].name == "name_value" @@ -7498,14 +7999,16 @@ async def test_set_network_policy_flattened_error_async(): ) -def test_set_maintenance_policy(transport: str = "grpc"): +def test_set_maintenance_policy( + transport: str = "grpc", request_type=cluster_service.SetMaintenancePolicyRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.SetMaintenancePolicyRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -7532,7 +8035,7 @@ def test_set_maintenance_policy(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.SetMaintenancePolicyRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.Operation) @@ -7560,6 +8063,10 @@ def test_set_maintenance_policy(transport: str = "grpc"): assert response.end_time == "end_time_value" +def test_set_maintenance_policy_from_dict(): + test_set_maintenance_policy(request_type=dict) + + @pytest.mark.asyncio async def test_set_maintenance_policy_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -7710,9 +8217,13 @@ def test_set_maintenance_policy_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].maintenance_policy == cluster_service.MaintenancePolicy( window=cluster_service.MaintenanceWindow( daily_maintenance_window=cluster_service.DailyMaintenanceWindow( @@ -7720,6 +8231,7 @@ def test_set_maintenance_policy_flattened(): ) ) ) + assert args[0].name == "name_value" @@ -7779,9 +8291,13 @@ async def test_set_maintenance_policy_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].maintenance_policy == cluster_service.MaintenancePolicy( window=cluster_service.MaintenanceWindow( daily_maintenance_window=cluster_service.DailyMaintenanceWindow( @@ -7789,6 +8305,7 @@ async def test_set_maintenance_policy_flattened_async(): ) ) ) + assert args[0].name == "name_value" @@ -7815,14 +8332,16 @@ async def test_set_maintenance_policy_flattened_error_async(): ) -def test_list_usable_subnetworks(transport: str = "grpc"): +def test_list_usable_subnetworks( + transport: str = "grpc", request_type=cluster_service.ListUsableSubnetworksRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.ListUsableSubnetworksRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -7839,7 +8358,7 @@ def test_list_usable_subnetworks(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.ListUsableSubnetworksRequest() # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListUsableSubnetworksPager) @@ -7847,6 +8366,10 @@ def test_list_usable_subnetworks(transport: str = "grpc"): assert response.next_page_token == "next_page_token_value" +def test_list_usable_subnetworks_from_dict(): + test_list_usable_subnetworks(request_type=dict) + + @pytest.mark.asyncio async def test_list_usable_subnetworks_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -8016,8 +8539,8 @@ def test_list_usable_subnetworks_pages(): RuntimeError, ) pages = list(client.list_usable_subnetworks(request={}).pages) - for page, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page.raw_page.next_page_token == token + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token @pytest.mark.asyncio @@ -8101,10 +8624,10 @@ async def test_list_usable_subnetworks_async_pages(): RuntimeError, ) pages = [] - async for page in (await client.list_usable_subnetworks(request={})).pages: - pages.append(page) - for page, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page.raw_page.next_page_token == token + async for page_ in (await client.list_usable_subnetworks(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token def test_credentials_transport_error(): @@ -8161,6 +8684,21 @@ def test_transport_get_channel(): assert channel +@pytest.mark.parametrize( + "transport_class", + [ + transports.ClusterManagerGrpcTransport, + transports.ClusterManagerGrpcAsyncIOTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) @@ -8178,9 +8716,13 @@ def test_cluster_manager_base_transport_error(): def test_cluster_manager_base_transport(): # Instantiate the base transport. - transport = transports.ClusterManagerTransport( - credentials=credentials.AnonymousCredentials(), - ) + with mock.patch( + "google.cloud.container_v1.services.cluster_manager.transports.ClusterManagerTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.ClusterManagerTransport( + credentials=credentials.AnonymousCredentials(), + ) # Every method on the transport should just blindly # raise NotImplementedError. @@ -8224,24 +8766,42 @@ def test_cluster_manager_base_transport(): def test_cluster_manager_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file - with mock.patch.object(auth, "load_credentials_from_file") as load_creds: + with mock.patch.object( + auth, "load_credentials_from_file" + ) as load_creds, mock.patch( + "google.cloud.container_v1.services.cluster_manager.transports.ClusterManagerTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None load_creds.return_value = (credentials.AnonymousCredentials(), None) transport = transports.ClusterManagerTransport( - credentials_file="credentials.json", + credentials_file="credentials.json", quota_project_id="octopus", ) load_creds.assert_called_once_with( "credentials.json", scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", ) +def test_cluster_manager_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(auth, "default") as adc, mock.patch( + "google.cloud.container_v1.services.cluster_manager.transports.ClusterManagerTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.ClusterManagerTransport() + adc.assert_called_once() + + def test_cluster_manager_auth_adc(): # If no credentials are provided, we should use ADC credentials. with mock.patch.object(auth, "default") as adc: adc.return_value = (credentials.AnonymousCredentials(), None) ClusterManagerClient() adc.assert_called_once_with( - scopes=("https://www.googleapis.com/auth/cloud-platform",) + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id=None, ) @@ -8250,9 +8810,12 @@ def test_cluster_manager_transport_auth_adc(): # ADC credentials. with mock.patch.object(auth, "default") as adc: adc.return_value = (credentials.AnonymousCredentials(), None) - transports.ClusterManagerGrpcTransport(host="squid.clam.whelk") + transports.ClusterManagerGrpcTransport( + host="squid.clam.whelk", quota_project_id="octopus" + ) adc.assert_called_once_with( - scopes=("https://www.googleapis.com/auth/cloud-platform",) + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", ) @@ -8279,172 +8842,128 @@ def test_cluster_manager_host_with_port(): def test_cluster_manager_grpc_transport_channel(): channel = grpc.insecure_channel("http://localhost/") - # Check that if channel is provided, mtls endpoint and client_cert_source - # won't be used. - callback = mock.MagicMock() + # Check that channel is used if provided. transport = transports.ClusterManagerGrpcTransport( - host="squid.clam.whelk", - channel=channel, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=callback, + host="squid.clam.whelk", channel=channel, ) assert transport.grpc_channel == channel assert transport._host == "squid.clam.whelk:443" - assert not callback.called def test_cluster_manager_grpc_asyncio_transport_channel(): channel = aio.insecure_channel("http://localhost/") - # Check that if channel is provided, mtls endpoint and client_cert_source - # won't be used. - callback = mock.MagicMock() + # Check that channel is used if provided. transport = transports.ClusterManagerGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=callback, + host="squid.clam.whelk", channel=channel, ) assert transport.grpc_channel == channel assert transport._host == "squid.clam.whelk:443" - assert not callback.called - - -@mock.patch("grpc.ssl_channel_credentials", autospec=True) -@mock.patch("google.api_core.grpc_helpers.create_channel", autospec=True) -def test_cluster_manager_grpc_transport_channel_mtls_with_client_cert_source( - grpc_create_channel, grpc_ssl_channel_cred -): - # Check that if channel is None, but api_mtls_endpoint and client_cert_source - # are provided, then a mTLS channel will be created. - mock_cred = mock.Mock() - - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - transport = transports.ClusterManagerGrpcTransport( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=("https://www.googleapis.com/auth/cloud-platform",), - ssl_credentials=mock_ssl_cred, - ) - assert transport.grpc_channel == mock_grpc_channel -@mock.patch("grpc.ssl_channel_credentials", autospec=True) -@mock.patch("google.api_core.grpc_helpers_async.create_channel", autospec=True) -def test_cluster_manager_grpc_asyncio_transport_channel_mtls_with_client_cert_source( - grpc_create_channel, grpc_ssl_channel_cred +@pytest.mark.parametrize( + "transport_class", + [ + transports.ClusterManagerGrpcTransport, + transports.ClusterManagerGrpcAsyncIOTransport, + ], +) +def test_cluster_manager_transport_channel_mtls_with_client_cert_source( + transport_class, ): - # Check that if channel is None, but api_mtls_endpoint and client_cert_source - # are provided, then a mTLS channel will be created. - mock_cred = mock.Mock() - - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel", autospec=True + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() - transport = transports.ClusterManagerGrpcAsyncIOTransport( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=("https://www.googleapis.com/auth/cloud-platform",), - ssl_credentials=mock_ssl_cred, - ) - assert transport.grpc_channel == mock_grpc_channel + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel @pytest.mark.parametrize( - "api_mtls_endpoint", ["mtls.squid.clam.whelk", "mtls.squid.clam.whelk:443"] + "transport_class", + [ + transports.ClusterManagerGrpcTransport, + transports.ClusterManagerGrpcAsyncIOTransport, + ], ) -@mock.patch("google.api_core.grpc_helpers.create_channel", autospec=True) -def test_cluster_manager_grpc_transport_channel_mtls_with_adc( - grpc_create_channel, api_mtls_endpoint -): - # Check that if channel and client_cert_source are None, but api_mtls_endpoint - # is provided, then a mTLS channel will be created with SSL ADC. - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - # Mock google.auth.transport.grpc.SslCredentials class. +def test_cluster_manager_transport_channel_mtls_with_adc(transport_class): mock_ssl_cred = mock.Mock() with mock.patch.multiple( "google.auth.transport.grpc.SslCredentials", __init__=mock.Mock(return_value=None), ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), ): - mock_cred = mock.Mock() - transport = transports.ClusterManagerGrpcTransport( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint=api_mtls_endpoint, - client_cert_source=None, - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=("https://www.googleapis.com/auth/cloud-platform",), - ssl_credentials=mock_ssl_cred, - ) - assert transport.grpc_channel == mock_grpc_channel + with mock.patch.object( + transport_class, "create_channel", autospec=True + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel -@pytest.mark.parametrize( - "api_mtls_endpoint", ["mtls.squid.clam.whelk", "mtls.squid.clam.whelk:443"] -) -@mock.patch("google.api_core.grpc_helpers_async.create_channel", autospec=True) -def test_cluster_manager_grpc_asyncio_transport_channel_mtls_with_adc( - grpc_create_channel, api_mtls_endpoint -): - # Check that if channel and client_cert_source are None, but api_mtls_endpoint - # is provided, then a mTLS channel will be created with SSL ADC. - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - # Mock google.auth.transport.grpc.SslCredentials class. - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - mock_cred = mock.Mock() - transport = transports.ClusterManagerGrpcAsyncIOTransport( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint=api_mtls_endpoint, - client_cert_source=None, +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.ClusterManagerTransport, "_prep_wrapped_messages" + ) as prep: + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), client_info=client_info, ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=("https://www.googleapis.com/auth/cloud-platform",), - ssl_credentials=mock_ssl_cred, + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.ClusterManagerTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = ClusterManagerClient.get_transport_class() + transport = transport_class( + credentials=credentials.AnonymousCredentials(), client_info=client_info, ) - assert transport.grpc_channel == mock_grpc_channel + prep.assert_called_once_with(client_info) diff --git a/tests/unit/gapic/container_v1beta1/__init__.py b/tests/unit/gapic/container_v1beta1/__init__.py index e69de29b..8b137891 100644 --- a/tests/unit/gapic/container_v1beta1/__init__.py +++ b/tests/unit/gapic/container_v1beta1/__init__.py @@ -0,0 +1 @@ + diff --git a/tests/unit/gapic/container_v1beta1/test_cluster_manager.py b/tests/unit/gapic/container_v1beta1/test_cluster_manager.py index 2c253f0f..941e6d01 100644 --- a/tests/unit/gapic/container_v1beta1/test_cluster_manager.py +++ b/tests/unit/gapic/container_v1beta1/test_cluster_manager.py @@ -22,6 +22,7 @@ from grpc.experimental import aio import math import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule from google import auth from google.api_core import client_options @@ -46,6 +47,17 @@ def client_cert_source_callback(): return b"cert bytes", b"key bytes" +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + def test__get_default_mtls_endpoint(): api_endpoint = "example.googleapis.com" api_mtls_endpoint = "example.mtls.googleapis.com" @@ -112,6 +124,16 @@ def test_cluster_manager_client_get_transport_class(): ), ], ) +@mock.patch.object( + ClusterManagerClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(ClusterManagerClient), +) +@mock.patch.object( + ClusterManagerAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(ClusterManagerAsyncClient), +) def test_cluster_manager_client_client_options( client_class, transport_class, transport_name ): @@ -136,103 +158,207 @@ def test_cluster_manager_client_client_options( credentials_file=None, host="squid.clam.whelk", scopes=None, - api_mtls_endpoint="squid.clam.whelk", - client_cert_source=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, ) - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS is + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is # "never". - os.environ["GOOGLE_API_USE_MTLS"] = "never" - with mock.patch.object(transport_class, "__init__") as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - api_mtls_endpoint=client.DEFAULT_ENDPOINT, - client_cert_source=None, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS is - # "always". - os.environ["GOOGLE_API_USE_MTLS"] = "always" - with mock.patch.object(transport_class, "__init__") as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT, - client_cert_source=None, - ) - - # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is - # "auto", and client_cert_source is provided. - os.environ["GOOGLE_API_USE_MTLS"] = "auto" - options = client_options.ClientOptions( - client_cert_source=client_cert_source_callback - ) - with mock.patch.object(transport_class, "__init__") as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT, - client_cert_source=client_cert_source_callback, - ) - - # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is - # "auto", and default_client_cert_source is provided. - os.environ["GOOGLE_API_USE_MTLS"] = "auto" - with mock.patch.object(transport_class, "__init__") as patched: - with mock.patch( - "google.auth.transport.mtls.has_default_client_cert_source", - return_value=True, - ): + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class() patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, + host=client.DEFAULT_ENDPOINT, scopes=None, - api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT, - client_cert_source=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, ) - # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is - # "auto", but client_cert_source and default_client_cert_source are None. - os.environ["GOOGLE_API_USE_MTLS"] = "auto" - with mock.patch.object(transport_class, "__init__") as patched: - with mock.patch( - "google.auth.transport.mtls.has_default_client_cert_source", - return_value=False, - ): + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: patched.return_value = None client = client_class() patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client.DEFAULT_MTLS_ENDPOINT, scopes=None, - api_mtls_endpoint=client.DEFAULT_ENDPOINT, - client_cert_source=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, ) - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS has + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has # unsupported value. - os.environ["GOOGLE_API_USE_MTLS"] = "Unsupported" - with pytest.raises(MutualTLSChannelError): - client = client_class() + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError): + client = client_class() - del os.environ["GOOGLE_API_USE_MTLS"] + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + (ClusterManagerClient, transports.ClusterManagerGrpcTransport, "grpc", "true"), + ( + ClusterManagerAsyncClient, + transports.ClusterManagerGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + (ClusterManagerClient, transports.ClusterManagerGrpcTransport, "grpc", "false"), + ( + ClusterManagerAsyncClient, + transports.ClusterManagerGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ], +) +@mock.patch.object( + ClusterManagerClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(ClusterManagerClient), +) +@mock.patch.object( + ClusterManagerAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(ClusterManagerAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_cluster_manager_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + ssl_channel_creds = mock.Mock() + with mock.patch( + "grpc.ssl_channel_credentials", return_value=ssl_channel_creds + ): + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_ssl_channel_creds = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_ssl_channel_creds = ssl_channel_creds + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + ssl_channel_credentials=expected_ssl_channel_creds, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.__init__", return_value=None + ): + with mock.patch( + "google.auth.transport.grpc.SslCredentials.is_mtls", + new_callable=mock.PropertyMock, + ) as is_mtls_mock: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.ssl_credentials", + new_callable=mock.PropertyMock, + ) as ssl_credentials_mock: + if use_client_cert_env == "false": + is_mtls_mock.return_value = False + ssl_credentials_mock.return_value = None + expected_host = client.DEFAULT_ENDPOINT + expected_ssl_channel_creds = None + else: + is_mtls_mock.return_value = True + ssl_credentials_mock.return_value = mock.Mock() + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_ssl_channel_creds = ( + ssl_credentials_mock.return_value + ) + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + ssl_channel_credentials=expected_ssl_channel_creds, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.grpc.SslCredentials.__init__", return_value=None + ): + with mock.patch( + "google.auth.transport.grpc.SslCredentials.is_mtls", + new_callable=mock.PropertyMock, + ) as is_mtls_mock: + is_mtls_mock.return_value = False + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + ) @pytest.mark.parametrize( @@ -259,8 +385,9 @@ def test_cluster_manager_client_client_options_scopes( credentials_file=None, host=client.DEFAULT_ENDPOINT, scopes=["1", "2"], - api_mtls_endpoint=client.DEFAULT_ENDPOINT, - client_cert_source=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -288,8 +415,9 @@ def test_cluster_manager_client_client_options_credentials_file( credentials_file="credentials.json", host=client.DEFAULT_ENDPOINT, scopes=None, - api_mtls_endpoint=client.DEFAULT_ENDPOINT, - client_cert_source=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, ) @@ -306,19 +434,22 @@ def test_cluster_manager_client_client_options_from_dict(): credentials_file=None, host="squid.clam.whelk", scopes=None, - api_mtls_endpoint="squid.clam.whelk", - client_cert_source=None, + ssl_channel_credentials=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, ) -def test_list_clusters(transport: str = "grpc"): +def test_list_clusters( + transport: str = "grpc", request_type=cluster_service.ListClustersRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.ListClustersRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client._transport.list_clusters), "__call__") as call: @@ -333,7 +464,7 @@ def test_list_clusters(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.ListClustersRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.ListClustersResponse) @@ -341,6 +472,10 @@ def test_list_clusters(transport: str = "grpc"): assert response.missing_zones == ["missing_zones_value"] +def test_list_clusters_from_dict(): + test_list_clusters(request_type=dict) + + @pytest.mark.asyncio async def test_list_clusters_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -445,7 +580,9 @@ def test_list_clusters_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" @@ -486,7 +623,9 @@ async def test_list_clusters_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" @@ -504,14 +643,16 @@ async def test_list_clusters_flattened_error_async(): ) -def test_get_cluster(transport: str = "grpc"): +def test_get_cluster( + transport: str = "grpc", request_type=cluster_service.GetClusterRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.GetClusterRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client._transport.get_cluster), "__call__") as call: @@ -555,7 +696,7 @@ def test_get_cluster(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.GetClusterRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.Cluster) @@ -621,6 +762,10 @@ def test_get_cluster(transport: str = "grpc"): assert response.tpu_ipv4_cidr_block == "tpu_ipv4_cidr_block_value" +def test_get_cluster_from_dict(): + test_get_cluster(request_type=dict) + + @pytest.mark.asyncio async def test_get_cluster_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -816,8 +961,11 @@ def test_get_cluster_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" @@ -861,8 +1009,11 @@ async def test_get_cluster_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" @@ -881,14 +1032,16 @@ async def test_get_cluster_flattened_error_async(): ) -def test_create_cluster(transport: str = "grpc"): +def test_create_cluster( + transport: str = "grpc", request_type=cluster_service.CreateClusterRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.CreateClusterRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client._transport.create_cluster), "__call__") as call: @@ -913,7 +1066,7 @@ def test_create_cluster(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.CreateClusterRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.Operation) @@ -941,6 +1094,10 @@ def test_create_cluster(transport: str = "grpc"): assert response.end_time == "end_time_value" +def test_create_cluster_from_dict(): + test_create_cluster(request_type=dict) + + @pytest.mark.asyncio async def test_create_cluster_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -1079,8 +1236,11 @@ def test_create_cluster_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster == cluster_service.Cluster(name="name_value") @@ -1124,8 +1284,11 @@ async def test_create_cluster_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster == cluster_service.Cluster(name="name_value") @@ -1144,14 +1307,16 @@ async def test_create_cluster_flattened_error_async(): ) -def test_update_cluster(transport: str = "grpc"): +def test_update_cluster( + transport: str = "grpc", request_type=cluster_service.UpdateClusterRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.UpdateClusterRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client._transport.update_cluster), "__call__") as call: @@ -1176,7 +1341,7 @@ def test_update_cluster(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.UpdateClusterRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.Operation) @@ -1204,6 +1369,10 @@ def test_update_cluster(transport: str = "grpc"): assert response.end_time == "end_time_value" +def test_update_cluster_from_dict(): + test_update_cluster(request_type=dict) + + @pytest.mark.asyncio async def test_update_cluster_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -1345,9 +1514,13 @@ def test_update_cluster_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].update == cluster_service.ClusterUpdate( desired_node_version="desired_node_version_value" ) @@ -1399,9 +1572,13 @@ async def test_update_cluster_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].update == cluster_service.ClusterUpdate( desired_node_version="desired_node_version_value" ) @@ -1425,14 +1602,16 @@ async def test_update_cluster_flattened_error_async(): ) -def test_update_node_pool(transport: str = "grpc"): +def test_update_node_pool( + transport: str = "grpc", request_type=cluster_service.UpdateNodePoolRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.UpdateNodePoolRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -1459,7 +1638,7 @@ def test_update_node_pool(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.UpdateNodePoolRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.Operation) @@ -1487,6 +1666,10 @@ def test_update_node_pool(transport: str = "grpc"): assert response.end_time == "end_time_value" +def test_update_node_pool_from_dict(): + test_update_node_pool(request_type=dict) + + @pytest.mark.asyncio async def test_update_node_pool_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -1607,14 +1790,16 @@ async def test_update_node_pool_field_headers_async(): assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] -def test_set_node_pool_autoscaling(transport: str = "grpc"): +def test_set_node_pool_autoscaling( + transport: str = "grpc", request_type=cluster_service.SetNodePoolAutoscalingRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.SetNodePoolAutoscalingRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -1641,7 +1826,7 @@ def test_set_node_pool_autoscaling(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.SetNodePoolAutoscalingRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.Operation) @@ -1669,6 +1854,10 @@ def test_set_node_pool_autoscaling(transport: str = "grpc"): assert response.end_time == "end_time_value" +def test_set_node_pool_autoscaling_from_dict(): + test_set_node_pool_autoscaling(request_type=dict) + + @pytest.mark.asyncio async def test_set_node_pool_autoscaling_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -1789,14 +1978,16 @@ async def test_set_node_pool_autoscaling_field_headers_async(): assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] -def test_set_logging_service(transport: str = "grpc"): +def test_set_logging_service( + transport: str = "grpc", request_type=cluster_service.SetLoggingServiceRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.SetLoggingServiceRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -1823,7 +2014,7 @@ def test_set_logging_service(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.SetLoggingServiceRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.Operation) @@ -1851,6 +2042,10 @@ def test_set_logging_service(transport: str = "grpc"): assert response.end_time == "end_time_value" +def test_set_logging_service_from_dict(): + test_set_logging_service(request_type=dict) + + @pytest.mark.asyncio async def test_set_logging_service_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -1994,9 +2189,13 @@ def test_set_logging_service_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].logging_service == "logging_service_value" @@ -2042,9 +2241,13 @@ async def test_set_logging_service_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].logging_service == "logging_service_value" @@ -2064,14 +2267,16 @@ async def test_set_logging_service_flattened_error_async(): ) -def test_set_monitoring_service(transport: str = "grpc"): +def test_set_monitoring_service( + transport: str = "grpc", request_type=cluster_service.SetMonitoringServiceRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.SetMonitoringServiceRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -2098,7 +2303,7 @@ def test_set_monitoring_service(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.SetMonitoringServiceRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.Operation) @@ -2126,6 +2331,10 @@ def test_set_monitoring_service(transport: str = "grpc"): assert response.end_time == "end_time_value" +def test_set_monitoring_service_from_dict(): + test_set_monitoring_service(request_type=dict) + + @pytest.mark.asyncio async def test_set_monitoring_service_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -2269,9 +2478,13 @@ def test_set_monitoring_service_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].monitoring_service == "monitoring_service_value" @@ -2317,9 +2530,13 @@ async def test_set_monitoring_service_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].monitoring_service == "monitoring_service_value" @@ -2339,14 +2556,16 @@ async def test_set_monitoring_service_flattened_error_async(): ) -def test_set_addons_config(transport: str = "grpc"): +def test_set_addons_config( + transport: str = "grpc", request_type=cluster_service.SetAddonsConfigRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.SetAddonsConfigRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -2373,7 +2592,7 @@ def test_set_addons_config(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.SetAddonsConfigRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.Operation) @@ -2401,6 +2620,10 @@ def test_set_addons_config(transport: str = "grpc"): assert response.end_time == "end_time_value" +def test_set_addons_config_from_dict(): + test_set_addons_config(request_type=dict) + + @pytest.mark.asyncio async def test_set_addons_config_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -2546,9 +2769,13 @@ def test_set_addons_config_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].addons_config == cluster_service.AddonsConfig( http_load_balancing=cluster_service.HttpLoadBalancing(disabled=True) ) @@ -2600,9 +2827,13 @@ async def test_set_addons_config_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].addons_config == cluster_service.AddonsConfig( http_load_balancing=cluster_service.HttpLoadBalancing(disabled=True) ) @@ -2626,14 +2857,16 @@ async def test_set_addons_config_flattened_error_async(): ) -def test_set_locations(transport: str = "grpc"): +def test_set_locations( + transport: str = "grpc", request_type=cluster_service.SetLocationsRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.SetLocationsRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client._transport.set_locations), "__call__") as call: @@ -2658,7 +2891,7 @@ def test_set_locations(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.SetLocationsRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.Operation) @@ -2686,6 +2919,10 @@ def test_set_locations(transport: str = "grpc"): assert response.end_time == "end_time_value" +def test_set_locations_from_dict(): + test_set_locations(request_type=dict) + + @pytest.mark.asyncio async def test_set_locations_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -2825,9 +3062,13 @@ def test_set_locations_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].locations == ["locations_value"] @@ -2873,9 +3114,13 @@ async def test_set_locations_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].locations == ["locations_value"] @@ -2895,14 +3140,16 @@ async def test_set_locations_flattened_error_async(): ) -def test_update_master(transport: str = "grpc"): +def test_update_master( + transport: str = "grpc", request_type=cluster_service.UpdateMasterRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.UpdateMasterRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client._transport.update_master), "__call__") as call: @@ -2927,7 +3174,7 @@ def test_update_master(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.UpdateMasterRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.Operation) @@ -2955,6 +3202,10 @@ def test_update_master(transport: str = "grpc"): assert response.end_time == "end_time_value" +def test_update_master_from_dict(): + test_update_master(request_type=dict) + + @pytest.mark.asyncio async def test_update_master_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -3094,9 +3345,13 @@ def test_update_master_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].master_version == "master_version_value" @@ -3142,9 +3397,13 @@ async def test_update_master_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].master_version == "master_version_value" @@ -3164,14 +3423,16 @@ async def test_update_master_flattened_error_async(): ) -def test_set_master_auth(transport: str = "grpc"): +def test_set_master_auth( + transport: str = "grpc", request_type=cluster_service.SetMasterAuthRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.SetMasterAuthRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client._transport.set_master_auth), "__call__") as call: @@ -3196,7 +3457,7 @@ def test_set_master_auth(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.SetMasterAuthRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.Operation) @@ -3224,6 +3485,10 @@ def test_set_master_auth(transport: str = "grpc"): assert response.end_time == "end_time_value" +def test_set_master_auth_from_dict(): + test_set_master_auth(request_type=dict) + + @pytest.mark.asyncio async def test_set_master_auth_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -3342,14 +3607,16 @@ async def test_set_master_auth_field_headers_async(): assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] -def test_delete_cluster(transport: str = "grpc"): +def test_delete_cluster( + transport: str = "grpc", request_type=cluster_service.DeleteClusterRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.DeleteClusterRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client._transport.delete_cluster), "__call__") as call: @@ -3374,7 +3641,7 @@ def test_delete_cluster(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.DeleteClusterRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.Operation) @@ -3402,6 +3669,10 @@ def test_delete_cluster(transport: str = "grpc"): assert response.end_time == "end_time_value" +def test_delete_cluster_from_dict(): + test_delete_cluster(request_type=dict) + + @pytest.mark.asyncio async def test_delete_cluster_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -3540,8 +3811,11 @@ def test_delete_cluster_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" @@ -3585,8 +3859,11 @@ async def test_delete_cluster_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" @@ -3605,14 +3882,16 @@ async def test_delete_cluster_flattened_error_async(): ) -def test_list_operations(transport: str = "grpc"): +def test_list_operations( + transport: str = "grpc", request_type=cluster_service.ListOperationsRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.ListOperationsRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client._transport.list_operations), "__call__") as call: @@ -3627,7 +3906,7 @@ def test_list_operations(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.ListOperationsRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.ListOperationsResponse) @@ -3635,6 +3914,10 @@ def test_list_operations(transport: str = "grpc"): assert response.missing_zones == ["missing_zones_value"] +def test_list_operations_from_dict(): + test_list_operations(request_type=dict) + + @pytest.mark.asyncio async def test_list_operations_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -3741,7 +4024,9 @@ def test_list_operations_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" @@ -3782,7 +4067,9 @@ async def test_list_operations_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" @@ -3800,14 +4087,16 @@ async def test_list_operations_flattened_error_async(): ) -def test_get_operation(transport: str = "grpc"): +def test_get_operation( + transport: str = "grpc", request_type=cluster_service.GetOperationRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.GetOperationRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client._transport.get_operation), "__call__") as call: @@ -3832,7 +4121,7 @@ def test_get_operation(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.GetOperationRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.Operation) @@ -3860,6 +4149,10 @@ def test_get_operation(transport: str = "grpc"): assert response.end_time == "end_time_value" +def test_get_operation_from_dict(): + test_get_operation(request_type=dict) + + @pytest.mark.asyncio async def test_get_operation_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -3998,8 +4291,11 @@ def test_get_operation_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].operation_id == "operation_id_value" @@ -4043,8 +4339,11 @@ async def test_get_operation_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].operation_id == "operation_id_value" @@ -4063,14 +4362,16 @@ async def test_get_operation_flattened_error_async(): ) -def test_cancel_operation(transport: str = "grpc"): +def test_cancel_operation( + transport: str = "grpc", request_type=cluster_service.CancelOperationRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.CancelOperationRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -4085,12 +4386,16 @@ def test_cancel_operation(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.CancelOperationRequest() # Establish that the response is the type that we expect. assert response is None +def test_cancel_operation_from_dict(): + test_cancel_operation(request_type=dict) + + @pytest.mark.asyncio async def test_cancel_operation_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -4195,8 +4500,11 @@ def test_cancel_operation_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].operation_id == "operation_id_value" @@ -4238,8 +4546,11 @@ async def test_cancel_operation_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].operation_id == "operation_id_value" @@ -4258,14 +4569,16 @@ async def test_cancel_operation_flattened_error_async(): ) -def test_get_server_config(transport: str = "grpc"): +def test_get_server_config( + transport: str = "grpc", request_type=cluster_service.GetServerConfigRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.GetServerConfigRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -4286,7 +4599,7 @@ def test_get_server_config(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.GetServerConfigRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.ServerConfig) @@ -4302,6 +4615,10 @@ def test_get_server_config(transport: str = "grpc"): assert response.valid_master_versions == ["valid_master_versions_value"] +def test_get_server_config_from_dict(): + test_get_server_config(request_type=dict) + + @pytest.mark.asyncio async def test_get_server_config_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -4424,7 +4741,9 @@ def test_get_server_config_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" @@ -4465,7 +4784,9 @@ async def test_get_server_config_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" @@ -4483,14 +4804,16 @@ async def test_get_server_config_flattened_error_async(): ) -def test_list_node_pools(transport: str = "grpc"): +def test_list_node_pools( + transport: str = "grpc", request_type=cluster_service.ListNodePoolsRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.ListNodePoolsRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client._transport.list_node_pools), "__call__") as call: @@ -4503,12 +4826,16 @@ def test_list_node_pools(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.ListNodePoolsRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.ListNodePoolsResponse) +def test_list_node_pools_from_dict(): + test_list_node_pools(request_type=dict) + + @pytest.mark.asyncio async def test_list_node_pools_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -4613,8 +4940,11 @@ def test_list_node_pools_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" @@ -4658,8 +4988,11 @@ async def test_list_node_pools_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" @@ -4678,14 +5011,16 @@ async def test_list_node_pools_flattened_error_async(): ) -def test_get_node_pool(transport: str = "grpc"): +def test_get_node_pool( + transport: str = "grpc", request_type=cluster_service.GetNodePoolRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.GetNodePoolRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client._transport.get_node_pool), "__call__") as call: @@ -4707,7 +5042,7 @@ def test_get_node_pool(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.GetNodePoolRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.NodePool) @@ -4729,6 +5064,10 @@ def test_get_node_pool(transport: str = "grpc"): assert response.pod_ipv4_cidr_size == 1856 +def test_get_node_pool_from_dict(): + test_get_node_pool(request_type=dict) + + @pytest.mark.asyncio async def test_get_node_pool_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -4859,9 +5198,13 @@ def test_get_node_pool_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].node_pool_id == "node_pool_id_value" @@ -4907,9 +5250,13 @@ async def test_get_node_pool_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].node_pool_id == "node_pool_id_value" @@ -4929,14 +5276,16 @@ async def test_get_node_pool_flattened_error_async(): ) -def test_create_node_pool(transport: str = "grpc"): +def test_create_node_pool( + transport: str = "grpc", request_type=cluster_service.CreateNodePoolRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.CreateNodePoolRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -4963,7 +5312,7 @@ def test_create_node_pool(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.CreateNodePoolRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.Operation) @@ -4991,6 +5340,10 @@ def test_create_node_pool(transport: str = "grpc"): assert response.end_time == "end_time_value" +def test_create_node_pool_from_dict(): + test_create_node_pool(request_type=dict) + + @pytest.mark.asyncio async def test_create_node_pool_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -5134,9 +5487,13 @@ def test_create_node_pool_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].node_pool == cluster_service.NodePool(name="name_value") @@ -5182,9 +5539,13 @@ async def test_create_node_pool_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].node_pool == cluster_service.NodePool(name="name_value") @@ -5204,14 +5565,16 @@ async def test_create_node_pool_flattened_error_async(): ) -def test_delete_node_pool(transport: str = "grpc"): +def test_delete_node_pool( + transport: str = "grpc", request_type=cluster_service.DeleteNodePoolRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.DeleteNodePoolRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -5238,7 +5601,7 @@ def test_delete_node_pool(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.DeleteNodePoolRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.Operation) @@ -5266,6 +5629,10 @@ def test_delete_node_pool(transport: str = "grpc"): assert response.end_time == "end_time_value" +def test_delete_node_pool_from_dict(): + test_delete_node_pool(request_type=dict) + + @pytest.mark.asyncio async def test_delete_node_pool_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -5409,9 +5776,13 @@ def test_delete_node_pool_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].node_pool_id == "node_pool_id_value" @@ -5457,9 +5828,13 @@ async def test_delete_node_pool_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].node_pool_id == "node_pool_id_value" @@ -5479,14 +5854,16 @@ async def test_delete_node_pool_flattened_error_async(): ) -def test_rollback_node_pool_upgrade(transport: str = "grpc"): +def test_rollback_node_pool_upgrade( + transport: str = "grpc", request_type=cluster_service.RollbackNodePoolUpgradeRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.RollbackNodePoolUpgradeRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -5513,7 +5890,7 @@ def test_rollback_node_pool_upgrade(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.RollbackNodePoolUpgradeRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.Operation) @@ -5541,6 +5918,10 @@ def test_rollback_node_pool_upgrade(transport: str = "grpc"): assert response.end_time == "end_time_value" +def test_rollback_node_pool_upgrade_from_dict(): + test_rollback_node_pool_upgrade(request_type=dict) + + @pytest.mark.asyncio async def test_rollback_node_pool_upgrade_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -5684,9 +6065,13 @@ def test_rollback_node_pool_upgrade_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].node_pool_id == "node_pool_id_value" @@ -5732,9 +6117,13 @@ async def test_rollback_node_pool_upgrade_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].node_pool_id == "node_pool_id_value" @@ -5754,14 +6143,16 @@ async def test_rollback_node_pool_upgrade_flattened_error_async(): ) -def test_set_node_pool_management(transport: str = "grpc"): +def test_set_node_pool_management( + transport: str = "grpc", request_type=cluster_service.SetNodePoolManagementRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.SetNodePoolManagementRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -5788,7 +6179,7 @@ def test_set_node_pool_management(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.SetNodePoolManagementRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.Operation) @@ -5816,6 +6207,10 @@ def test_set_node_pool_management(transport: str = "grpc"): assert response.end_time == "end_time_value" +def test_set_node_pool_management_from_dict(): + test_set_node_pool_management(request_type=dict) + + @pytest.mark.asyncio async def test_set_node_pool_management_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -5960,10 +6355,15 @@ def test_set_node_pool_management_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].node_pool_id == "node_pool_id_value" + assert args[0].management == cluster_service.NodeManagement(auto_upgrade=True) @@ -6011,10 +6411,15 @@ async def test_set_node_pool_management_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].node_pool_id == "node_pool_id_value" + assert args[0].management == cluster_service.NodeManagement(auto_upgrade=True) @@ -6035,14 +6440,16 @@ async def test_set_node_pool_management_flattened_error_async(): ) -def test_set_labels(transport: str = "grpc"): +def test_set_labels( + transport: str = "grpc", request_type=cluster_service.SetLabelsRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.SetLabelsRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client._transport.set_labels), "__call__") as call: @@ -6067,7 +6474,7 @@ def test_set_labels(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.SetLabelsRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.Operation) @@ -6095,6 +6502,10 @@ def test_set_labels(transport: str = "grpc"): assert response.end_time == "end_time_value" +def test_set_labels_from_dict(): + test_set_labels(request_type=dict) + + @pytest.mark.asyncio async def test_set_labels_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -6235,10 +6646,15 @@ def test_set_labels_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].resource_labels == {"key_value": "value_value"} + assert args[0].label_fingerprint == "label_fingerprint_value" @@ -6286,10 +6702,15 @@ async def test_set_labels_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].resource_labels == {"key_value": "value_value"} + assert args[0].label_fingerprint == "label_fingerprint_value" @@ -6310,14 +6731,16 @@ async def test_set_labels_flattened_error_async(): ) -def test_set_legacy_abac(transport: str = "grpc"): +def test_set_legacy_abac( + transport: str = "grpc", request_type=cluster_service.SetLegacyAbacRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.SetLegacyAbacRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client._transport.set_legacy_abac), "__call__") as call: @@ -6342,7 +6765,7 @@ def test_set_legacy_abac(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.SetLegacyAbacRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.Operation) @@ -6370,6 +6793,10 @@ def test_set_legacy_abac(transport: str = "grpc"): assert response.end_time == "end_time_value" +def test_set_legacy_abac_from_dict(): + test_set_legacy_abac(request_type=dict) + + @pytest.mark.asyncio async def test_set_legacy_abac_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -6509,9 +6936,13 @@ def test_set_legacy_abac_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].enabled == True @@ -6557,9 +6988,13 @@ async def test_set_legacy_abac_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].enabled == True @@ -6579,14 +7014,16 @@ async def test_set_legacy_abac_flattened_error_async(): ) -def test_start_ip_rotation(transport: str = "grpc"): +def test_start_ip_rotation( + transport: str = "grpc", request_type=cluster_service.StartIPRotationRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.StartIPRotationRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -6613,7 +7050,7 @@ def test_start_ip_rotation(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.StartIPRotationRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.Operation) @@ -6641,6 +7078,10 @@ def test_start_ip_rotation(transport: str = "grpc"): assert response.end_time == "end_time_value" +def test_start_ip_rotation_from_dict(): + test_start_ip_rotation(request_type=dict) + + @pytest.mark.asyncio async def test_start_ip_rotation_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -6783,8 +7224,11 @@ def test_start_ip_rotation_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" @@ -6828,8 +7272,11 @@ async def test_start_ip_rotation_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" @@ -6848,14 +7295,16 @@ async def test_start_ip_rotation_flattened_error_async(): ) -def test_complete_ip_rotation(transport: str = "grpc"): +def test_complete_ip_rotation( + transport: str = "grpc", request_type=cluster_service.CompleteIPRotationRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.CompleteIPRotationRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -6882,7 +7331,7 @@ def test_complete_ip_rotation(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.CompleteIPRotationRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.Operation) @@ -6910,6 +7359,10 @@ def test_complete_ip_rotation(transport: str = "grpc"): assert response.end_time == "end_time_value" +def test_complete_ip_rotation_from_dict(): + test_complete_ip_rotation(request_type=dict) + + @pytest.mark.asyncio async def test_complete_ip_rotation_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -7052,8 +7505,11 @@ def test_complete_ip_rotation_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" @@ -7097,8 +7553,11 @@ async def test_complete_ip_rotation_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" @@ -7117,14 +7576,16 @@ async def test_complete_ip_rotation_flattened_error_async(): ) -def test_set_node_pool_size(transport: str = "grpc"): +def test_set_node_pool_size( + transport: str = "grpc", request_type=cluster_service.SetNodePoolSizeRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.SetNodePoolSizeRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -7151,7 +7612,7 @@ def test_set_node_pool_size(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.SetNodePoolSizeRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.Operation) @@ -7179,6 +7640,10 @@ def test_set_node_pool_size(transport: str = "grpc"): assert response.end_time == "end_time_value" +def test_set_node_pool_size_from_dict(): + test_set_node_pool_size(request_type=dict) + + @pytest.mark.asyncio async def test_set_node_pool_size_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -7299,14 +7764,16 @@ async def test_set_node_pool_size_field_headers_async(): assert ("x-goog-request-params", "name=name/value",) in kw["metadata"] -def test_set_network_policy(transport: str = "grpc"): +def test_set_network_policy( + transport: str = "grpc", request_type=cluster_service.SetNetworkPolicyRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.SetNetworkPolicyRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -7333,7 +7800,7 @@ def test_set_network_policy(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.SetNetworkPolicyRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.Operation) @@ -7361,6 +7828,10 @@ def test_set_network_policy(transport: str = "grpc"): assert response.end_time == "end_time_value" +def test_set_network_policy_from_dict(): + test_set_network_policy(request_type=dict) + + @pytest.mark.asyncio async def test_set_network_policy_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -7506,9 +7977,13 @@ def test_set_network_policy_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].network_policy == cluster_service.NetworkPolicy( provider=cluster_service.NetworkPolicy.Provider.CALICO ) @@ -7560,9 +8035,13 @@ async def test_set_network_policy_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].network_policy == cluster_service.NetworkPolicy( provider=cluster_service.NetworkPolicy.Provider.CALICO ) @@ -7586,14 +8065,16 @@ async def test_set_network_policy_flattened_error_async(): ) -def test_set_maintenance_policy(transport: str = "grpc"): +def test_set_maintenance_policy( + transport: str = "grpc", request_type=cluster_service.SetMaintenancePolicyRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.SetMaintenancePolicyRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -7620,7 +8101,7 @@ def test_set_maintenance_policy(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.SetMaintenancePolicyRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.Operation) @@ -7648,6 +8129,10 @@ def test_set_maintenance_policy(transport: str = "grpc"): assert response.end_time == "end_time_value" +def test_set_maintenance_policy_from_dict(): + test_set_maintenance_policy(request_type=dict) + + @pytest.mark.asyncio async def test_set_maintenance_policy_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -7797,9 +8282,13 @@ def test_set_maintenance_policy_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].maintenance_policy == cluster_service.MaintenancePolicy( window=cluster_service.MaintenanceWindow( daily_maintenance_window=cluster_service.DailyMaintenanceWindow( @@ -7863,9 +8352,13 @@ async def test_set_maintenance_policy_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].project_id == "project_id_value" + assert args[0].zone == "zone_value" + assert args[0].cluster_id == "cluster_id_value" + assert args[0].maintenance_policy == cluster_service.MaintenancePolicy( window=cluster_service.MaintenanceWindow( daily_maintenance_window=cluster_service.DailyMaintenanceWindow( @@ -7897,14 +8390,16 @@ async def test_set_maintenance_policy_flattened_error_async(): ) -def test_list_usable_subnetworks(transport: str = "grpc"): +def test_list_usable_subnetworks( + transport: str = "grpc", request_type=cluster_service.ListUsableSubnetworksRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.ListUsableSubnetworksRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -7921,7 +8416,7 @@ def test_list_usable_subnetworks(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.ListUsableSubnetworksRequest() # Establish that the response is the type that we expect. assert isinstance(response, pagers.ListUsableSubnetworksPager) @@ -7929,6 +8424,10 @@ def test_list_usable_subnetworks(transport: str = "grpc"): assert response.next_page_token == "next_page_token_value" +def test_list_usable_subnetworks_from_dict(): + test_list_usable_subnetworks(request_type=dict) + + @pytest.mark.asyncio async def test_list_usable_subnetworks_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -8037,6 +8536,7 @@ def test_list_usable_subnetworks_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" @@ -8073,6 +8573,7 @@ async def test_list_usable_subnetworks_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" @@ -8167,8 +8668,8 @@ def test_list_usable_subnetworks_pages(): RuntimeError, ) pages = list(client.list_usable_subnetworks(request={}).pages) - for page, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page.raw_page.next_page_token == token + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token @pytest.mark.asyncio @@ -8252,20 +8753,22 @@ async def test_list_usable_subnetworks_async_pages(): RuntimeError, ) pages = [] - async for page in (await client.list_usable_subnetworks(request={})).pages: - pages.append(page) - for page, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page.raw_page.next_page_token == token + async for page_ in (await client.list_usable_subnetworks(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token -def test_list_locations(transport: str = "grpc"): +def test_list_locations( + transport: str = "grpc", request_type=cluster_service.ListLocationsRequest +): client = ClusterManagerClient( credentials=credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. - request = cluster_service.ListLocationsRequest() + request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client._transport.list_locations), "__call__") as call: @@ -8280,7 +8783,7 @@ def test_list_locations(transport: str = "grpc"): assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - assert args[0] == request + assert args[0] == cluster_service.ListLocationsRequest() # Establish that the response is the type that we expect. assert isinstance(response, cluster_service.ListLocationsResponse) @@ -8288,6 +8791,10 @@ def test_list_locations(transport: str = "grpc"): assert response.next_page_token == "next_page_token_value" +def test_list_locations_from_dict(): + test_list_locations(request_type=dict) + + @pytest.mark.asyncio async def test_list_locations_async(transport: str = "grpc_asyncio"): client = ClusterManagerAsyncClient( @@ -8392,6 +8899,7 @@ def test_list_locations_flattened(): # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" @@ -8428,6 +8936,7 @@ async def test_list_locations_flattened_async(): # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] + assert args[0].parent == "parent_value" @@ -8497,6 +9006,21 @@ def test_transport_get_channel(): assert channel +@pytest.mark.parametrize( + "transport_class", + [ + transports.ClusterManagerGrpcTransport, + transports.ClusterManagerGrpcAsyncIOTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(auth, "default") as adc: + adc.return_value = (credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = ClusterManagerClient(credentials=credentials.AnonymousCredentials(),) @@ -8514,9 +9038,13 @@ def test_cluster_manager_base_transport_error(): def test_cluster_manager_base_transport(): # Instantiate the base transport. - transport = transports.ClusterManagerTransport( - credentials=credentials.AnonymousCredentials(), - ) + with mock.patch( + "google.cloud.container_v1beta1.services.cluster_manager.transports.ClusterManagerTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.ClusterManagerTransport( + credentials=credentials.AnonymousCredentials(), + ) # Every method on the transport should just blindly # raise NotImplementedError. @@ -8561,24 +9089,42 @@ def test_cluster_manager_base_transport(): def test_cluster_manager_base_transport_with_credentials_file(): # Instantiate the base transport with a credentials file - with mock.patch.object(auth, "load_credentials_from_file") as load_creds: + with mock.patch.object( + auth, "load_credentials_from_file" + ) as load_creds, mock.patch( + "google.cloud.container_v1beta1.services.cluster_manager.transports.ClusterManagerTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None load_creds.return_value = (credentials.AnonymousCredentials(), None) transport = transports.ClusterManagerTransport( - credentials_file="credentials.json", + credentials_file="credentials.json", quota_project_id="octopus", ) load_creds.assert_called_once_with( "credentials.json", scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", ) +def test_cluster_manager_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(auth, "default") as adc, mock.patch( + "google.cloud.container_v1beta1.services.cluster_manager.transports.ClusterManagerTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (credentials.AnonymousCredentials(), None) + transport = transports.ClusterManagerTransport() + adc.assert_called_once() + + def test_cluster_manager_auth_adc(): # If no credentials are provided, we should use ADC credentials. with mock.patch.object(auth, "default") as adc: adc.return_value = (credentials.AnonymousCredentials(), None) ClusterManagerClient() adc.assert_called_once_with( - scopes=("https://www.googleapis.com/auth/cloud-platform",) + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id=None, ) @@ -8587,9 +9133,12 @@ def test_cluster_manager_transport_auth_adc(): # ADC credentials. with mock.patch.object(auth, "default") as adc: adc.return_value = (credentials.AnonymousCredentials(), None) - transports.ClusterManagerGrpcTransport(host="squid.clam.whelk") + transports.ClusterManagerGrpcTransport( + host="squid.clam.whelk", quota_project_id="octopus" + ) adc.assert_called_once_with( - scopes=("https://www.googleapis.com/auth/cloud-platform",) + scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", ) @@ -8616,172 +9165,128 @@ def test_cluster_manager_host_with_port(): def test_cluster_manager_grpc_transport_channel(): channel = grpc.insecure_channel("http://localhost/") - # Check that if channel is provided, mtls endpoint and client_cert_source - # won't be used. - callback = mock.MagicMock() + # Check that channel is used if provided. transport = transports.ClusterManagerGrpcTransport( - host="squid.clam.whelk", - channel=channel, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=callback, + host="squid.clam.whelk", channel=channel, ) assert transport.grpc_channel == channel assert transport._host == "squid.clam.whelk:443" - assert not callback.called def test_cluster_manager_grpc_asyncio_transport_channel(): channel = aio.insecure_channel("http://localhost/") - # Check that if channel is provided, mtls endpoint and client_cert_source - # won't be used. - callback = mock.MagicMock() + # Check that channel is used if provided. transport = transports.ClusterManagerGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=callback, + host="squid.clam.whelk", channel=channel, ) assert transport.grpc_channel == channel assert transport._host == "squid.clam.whelk:443" - assert not callback.called - - -@mock.patch("grpc.ssl_channel_credentials", autospec=True) -@mock.patch("google.api_core.grpc_helpers.create_channel", autospec=True) -def test_cluster_manager_grpc_transport_channel_mtls_with_client_cert_source( - grpc_create_channel, grpc_ssl_channel_cred -): - # Check that if channel is None, but api_mtls_endpoint and client_cert_source - # are provided, then a mTLS channel will be created. - mock_cred = mock.Mock() - - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - transport = transports.ClusterManagerGrpcTransport( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=("https://www.googleapis.com/auth/cloud-platform",), - ssl_credentials=mock_ssl_cred, - ) - assert transport.grpc_channel == mock_grpc_channel -@mock.patch("grpc.ssl_channel_credentials", autospec=True) -@mock.patch("google.api_core.grpc_helpers_async.create_channel", autospec=True) -def test_cluster_manager_grpc_asyncio_transport_channel_mtls_with_client_cert_source( - grpc_create_channel, grpc_ssl_channel_cred +@pytest.mark.parametrize( + "transport_class", + [ + transports.ClusterManagerGrpcTransport, + transports.ClusterManagerGrpcAsyncIOTransport, + ], +) +def test_cluster_manager_transport_channel_mtls_with_client_cert_source( + transport_class, ): - # Check that if channel is None, but api_mtls_endpoint and client_cert_source - # are provided, then a mTLS channel will be created. - mock_cred = mock.Mock() - - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel", autospec=True + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() - transport = transports.ClusterManagerGrpcAsyncIOTransport( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=("https://www.googleapis.com/auth/cloud-platform",), - ssl_credentials=mock_ssl_cred, - ) - assert transport.grpc_channel == mock_grpc_channel + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel @pytest.mark.parametrize( - "api_mtls_endpoint", ["mtls.squid.clam.whelk", "mtls.squid.clam.whelk:443"] + "transport_class", + [ + transports.ClusterManagerGrpcTransport, + transports.ClusterManagerGrpcAsyncIOTransport, + ], ) -@mock.patch("google.api_core.grpc_helpers.create_channel", autospec=True) -def test_cluster_manager_grpc_transport_channel_mtls_with_adc( - grpc_create_channel, api_mtls_endpoint -): - # Check that if channel and client_cert_source are None, but api_mtls_endpoint - # is provided, then a mTLS channel will be created with SSL ADC. - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - # Mock google.auth.transport.grpc.SslCredentials class. +def test_cluster_manager_transport_channel_mtls_with_adc(transport_class): mock_ssl_cred = mock.Mock() with mock.patch.multiple( "google.auth.transport.grpc.SslCredentials", __init__=mock.Mock(return_value=None), ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), ): - mock_cred = mock.Mock() - transport = transports.ClusterManagerGrpcTransport( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint=api_mtls_endpoint, - client_cert_source=None, - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=("https://www.googleapis.com/auth/cloud-platform",), - ssl_credentials=mock_ssl_cred, - ) - assert transport.grpc_channel == mock_grpc_channel + with mock.patch.object( + transport_class, "create_channel", autospec=True + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=("https://www.googleapis.com/auth/cloud-platform",), + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + ) + assert transport.grpc_channel == mock_grpc_channel -@pytest.mark.parametrize( - "api_mtls_endpoint", ["mtls.squid.clam.whelk", "mtls.squid.clam.whelk:443"] -) -@mock.patch("google.api_core.grpc_helpers_async.create_channel", autospec=True) -def test_cluster_manager_grpc_asyncio_transport_channel_mtls_with_adc( - grpc_create_channel, api_mtls_endpoint -): - # Check that if channel and client_cert_source are None, but api_mtls_endpoint - # is provided, then a mTLS channel will be created with SSL ADC. - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - # Mock google.auth.transport.grpc.SslCredentials class. - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - mock_cred = mock.Mock() - transport = transports.ClusterManagerGrpcAsyncIOTransport( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint=api_mtls_endpoint, - client_cert_source=None, +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.ClusterManagerTransport, "_prep_wrapped_messages" + ) as prep: + client = ClusterManagerClient( + credentials=credentials.AnonymousCredentials(), client_info=client_info, ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=("https://www.googleapis.com/auth/cloud-platform",), - ssl_credentials=mock_ssl_cred, + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.ClusterManagerTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = ClusterManagerClient.get_transport_class() + transport = transport_class( + credentials=credentials.AnonymousCredentials(), client_info=client_info, ) - assert transport.grpc_channel == mock_grpc_channel + prep.assert_called_once_with(client_info)