diff --git a/.flake8 b/.flake8
index d1134d7..ed93163 100644
--- a/.flake8
+++ b/.flake8
@@ -16,7 +16,7 @@
# Generated by synthtool. DO NOT EDIT!
[flake8]
-ignore = E203, E266, E501, W503, F401, F841
+ignore = E203, E266, E501, W503
exclude =
# Exclude generated code.
**/proto/**
diff --git a/.github/snippet-bot.yml b/.github/snippet-bot.yml
new file mode 100644
index 0000000..e69de29
diff --git a/.gitignore b/.gitignore
index b87e1ed..b9daa52 100644
--- a/.gitignore
+++ b/.gitignore
@@ -46,6 +46,7 @@ pip-log.txt
# Built documentation
docs/_build
bigquery/docs/generated
+docs.metadata
# Virtual environment
env/
@@ -57,4 +58,4 @@ system_tests/local_test_setup
# Make sure a generated file isn't accidentally committed.
pylintrc
-pylintrc.test
\ No newline at end of file
+pylintrc.test
diff --git a/.kokoro/build.sh b/.kokoro/build.sh
index d31d3af..a4d6015 100755
--- a/.kokoro/build.sh
+++ b/.kokoro/build.sh
@@ -36,4 +36,10 @@ python3.6 -m pip uninstall --yes --quiet nox-automation
python3.6 -m pip install --upgrade --quiet nox
python3.6 -m nox --version
-python3.6 -m nox
+# If NOX_SESSION is set, it only runs the specified session,
+# otherwise run all the sessions.
+if [[ -n "${NOX_SESSION:-}" ]]; then
+ python3.6 -m nox -s "${NOX_SESSION:-}"
+else
+ python3.6 -m nox
+fi
diff --git a/.kokoro/docker/docs/Dockerfile b/.kokoro/docker/docs/Dockerfile
new file mode 100644
index 0000000..412b0b5
--- /dev/null
+++ b/.kokoro/docker/docs/Dockerfile
@@ -0,0 +1,98 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ubuntu:20.04
+
+ENV DEBIAN_FRONTEND noninteractive
+
+# Ensure local Python is preferred over distribution Python.
+ENV PATH /usr/local/bin:$PATH
+
+# Install dependencies.
+RUN apt-get update \
+ && apt-get install -y --no-install-recommends \
+ apt-transport-https \
+ build-essential \
+ ca-certificates \
+ curl \
+ dirmngr \
+ git \
+ gpg-agent \
+ graphviz \
+ libbz2-dev \
+ libdb5.3-dev \
+ libexpat1-dev \
+ libffi-dev \
+ liblzma-dev \
+ libreadline-dev \
+ libsnappy-dev \
+ libssl-dev \
+ libsqlite3-dev \
+ portaudio19-dev \
+ redis-server \
+ software-properties-common \
+ ssh \
+ sudo \
+ tcl \
+ tcl-dev \
+ tk \
+ tk-dev \
+ uuid-dev \
+ wget \
+ zlib1g-dev \
+ && add-apt-repository universe \
+ && apt-get update \
+ && apt-get -y install jq \
+ && apt-get clean autoclean \
+ && apt-get autoremove -y \
+ && rm -rf /var/lib/apt/lists/* \
+ && rm -f /var/cache/apt/archives/*.deb
+
+
+COPY fetch_gpg_keys.sh /tmp
+# Install the desired versions of Python.
+RUN set -ex \
+ && export GNUPGHOME="$(mktemp -d)" \
+ && echo "disable-ipv6" >> "${GNUPGHOME}/dirmngr.conf" \
+ && /tmp/fetch_gpg_keys.sh \
+ && for PYTHON_VERSION in 3.7.8 3.8.5; do \
+ wget --no-check-certificate -O python-${PYTHON_VERSION}.tar.xz "https://www.python.org/ftp/python/${PYTHON_VERSION%%[a-z]*}/Python-$PYTHON_VERSION.tar.xz" \
+ && wget --no-check-certificate -O python-${PYTHON_VERSION}.tar.xz.asc "https://www.python.org/ftp/python/${PYTHON_VERSION%%[a-z]*}/Python-$PYTHON_VERSION.tar.xz.asc" \
+ && gpg --batch --verify python-${PYTHON_VERSION}.tar.xz.asc python-${PYTHON_VERSION}.tar.xz \
+ && rm -r python-${PYTHON_VERSION}.tar.xz.asc \
+ && mkdir -p /usr/src/python-${PYTHON_VERSION} \
+ && tar -xJC /usr/src/python-${PYTHON_VERSION} --strip-components=1 -f python-${PYTHON_VERSION}.tar.xz \
+ && rm python-${PYTHON_VERSION}.tar.xz \
+ && cd /usr/src/python-${PYTHON_VERSION} \
+ && ./configure \
+ --enable-shared \
+ # This works only on Python 2.7 and throws a warning on every other
+ # version, but seems otherwise harmless.
+ --enable-unicode=ucs4 \
+ --with-system-ffi \
+ --without-ensurepip \
+ && make -j$(nproc) \
+ && make install \
+ && ldconfig \
+ ; done \
+ && rm -rf "${GNUPGHOME}" \
+ && rm -rf /usr/src/python* \
+ && rm -rf ~/.cache/
+
+RUN wget -O /tmp/get-pip.py 'https://bootstrap.pypa.io/get-pip.py' \
+ && python3.7 /tmp/get-pip.py \
+ && python3.8 /tmp/get-pip.py \
+ && rm /tmp/get-pip.py
+
+CMD ["python3.7"]
diff --git a/.kokoro/docker/docs/fetch_gpg_keys.sh b/.kokoro/docker/docs/fetch_gpg_keys.sh
new file mode 100755
index 0000000..d653dd8
--- /dev/null
+++ b/.kokoro/docker/docs/fetch_gpg_keys.sh
@@ -0,0 +1,45 @@
+#!/bin/bash
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# A script to fetch gpg keys with retry.
+# Avoid jinja parsing the file.
+#
+
+function retry {
+ if [[ "${#}" -le 1 ]]; then
+ echo "Usage: ${0} retry_count commands.."
+ exit 1
+ fi
+ local retries=${1}
+ local command="${@:2}"
+ until [[ "${retries}" -le 0 ]]; do
+ $command && return 0
+ if [[ $? -ne 0 ]]; then
+ echo "command failed, retrying"
+ ((retries--))
+ fi
+ done
+ return 1
+}
+
+# 3.6.9, 3.7.5 (Ned Deily)
+retry 3 gpg --keyserver ha.pool.sks-keyservers.net --recv-keys \
+ 0D96DF4D4110E5C43FBFB17F2D347EA6AA65421D
+
+# 3.8.0 (Łukasz Langa)
+retry 3 gpg --keyserver ha.pool.sks-keyservers.net --recv-keys \
+ E3FF2839C048B25C084DEBE9B26995E310250568
+
+#
diff --git a/.kokoro/docs/common.cfg b/.kokoro/docs/common.cfg
index a9c6a0b..b789505 100644
--- a/.kokoro/docs/common.cfg
+++ b/.kokoro/docs/common.cfg
@@ -11,12 +11,12 @@ action {
gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline"
# Use the trampoline script to run in docker.
-build_file: "python-os-config/.kokoro/trampoline.sh"
+build_file: "python-os-config/.kokoro/trampoline_v2.sh"
# Configure the docker image for kokoro-trampoline.
env_vars: {
key: "TRAMPOLINE_IMAGE"
- value: "gcr.io/cloud-devrel-kokoro-resources/python-multi"
+ value: "gcr.io/cloud-devrel-kokoro-resources/python-lib-docs"
}
env_vars: {
key: "TRAMPOLINE_BUILD_FILE"
@@ -28,6 +28,23 @@ env_vars: {
value: "docs-staging"
}
+env_vars: {
+ key: "V2_STAGING_BUCKET"
+ value: "docs-staging-v2"
+}
+
+# It will upload the docker image after successful builds.
+env_vars: {
+ key: "TRAMPOLINE_IMAGE_UPLOAD"
+ value: "true"
+}
+
+# It will always build the docker image.
+env_vars: {
+ key: "TRAMPOLINE_DOCKERFILE"
+ value: ".kokoro/docker/docs/Dockerfile"
+}
+
# Fetch the token needed for reporting release status to GitHub
before_action {
fetch_keystore {
diff --git a/.kokoro/docs/docs-presubmit.cfg b/.kokoro/docs/docs-presubmit.cfg
new file mode 100644
index 0000000..1118107
--- /dev/null
+++ b/.kokoro/docs/docs-presubmit.cfg
@@ -0,0 +1,17 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "STAGING_BUCKET"
+ value: "gcloud-python-test"
+}
+
+env_vars: {
+ key: "V2_STAGING_BUCKET"
+ value: "gcloud-python-test"
+}
+
+# We only upload the image in the main `docs` build.
+env_vars: {
+ key: "TRAMPOLINE_IMAGE_UPLOAD"
+ value: "false"
+}
diff --git a/.kokoro/populate-secrets.sh b/.kokoro/populate-secrets.sh
new file mode 100755
index 0000000..f525142
--- /dev/null
+++ b/.kokoro/populate-secrets.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+# Copyright 2020 Google LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -eo pipefail
+
+function now { date +"%Y-%m-%d %H:%M:%S" | tr -d '\n' ;}
+function msg { println "$*" >&2 ;}
+function println { printf '%s\n' "$(now) $*" ;}
+
+
+# Populates requested secrets set in SECRET_MANAGER_KEYS from service account:
+# kokoro-trampoline@cloud-devrel-kokoro-resources.iam.gserviceaccount.com
+SECRET_LOCATION="${KOKORO_GFILE_DIR}/secret_manager"
+msg "Creating folder on disk for secrets: ${SECRET_LOCATION}"
+mkdir -p ${SECRET_LOCATION}
+for key in $(echo ${SECRET_MANAGER_KEYS} | sed "s/,/ /g")
+do
+ msg "Retrieving secret ${key}"
+ docker run --entrypoint=gcloud \
+ --volume=${KOKORO_GFILE_DIR}:${KOKORO_GFILE_DIR} \
+ gcr.io/google.com/cloudsdktool/cloud-sdk \
+ secrets versions access latest \
+ --project cloud-devrel-kokoro-resources \
+ --secret ${key} > \
+ "${SECRET_LOCATION}/${key}"
+ if [[ $? == 0 ]]; then
+ msg "Secret written to ${SECRET_LOCATION}/${key}"
+ else
+ msg "Error retrieving secret ${key}"
+ fi
+done
diff --git a/.kokoro/publish-docs.sh b/.kokoro/publish-docs.sh
index bfa9f2a..8acb14e 100755
--- a/.kokoro/publish-docs.sh
+++ b/.kokoro/publish-docs.sh
@@ -18,26 +18,16 @@ set -eo pipefail
# Disable buffering, so that the logs stream through.
export PYTHONUNBUFFERED=1
-cd github/python-os-config
-
-# Remove old nox
-python3.6 -m pip uninstall --yes --quiet nox-automation
+export PATH="${HOME}/.local/bin:${PATH}"
# Install nox
-python3.6 -m pip install --upgrade --quiet nox
-python3.6 -m nox --version
+python3 -m pip install --user --upgrade --quiet nox
+python3 -m nox --version
# build docs
nox -s docs
-python3 -m pip install gcp-docuploader
-
-# install a json parser
-sudo apt-get update
-sudo apt-get -y install software-properties-common
-sudo add-apt-repository universe
-sudo apt-get update
-sudo apt-get -y install jq
+python3 -m pip install --user gcp-docuploader
# create metadata
python3 -m docuploader create-metadata \
@@ -52,4 +42,23 @@ python3 -m docuploader create-metadata \
cat docs.metadata
# upload docs
-python3 -m docuploader upload docs/_build/html --metadata-file docs.metadata --staging-bucket docs-staging
+python3 -m docuploader upload docs/_build/html --metadata-file docs.metadata --staging-bucket "${STAGING_BUCKET}"
+
+
+# docfx yaml files
+nox -s docfx
+
+# create metadata.
+python3 -m docuploader create-metadata \
+ --name=$(jq --raw-output '.name // empty' .repo-metadata.json) \
+ --version=$(python3 setup.py --version) \
+ --language=$(jq --raw-output '.language // empty' .repo-metadata.json) \
+ --distribution-name=$(python3 setup.py --name) \
+ --product-page=$(jq --raw-output '.product_documentation // empty' .repo-metadata.json) \
+ --github-repository=$(jq --raw-output '.repo // empty' .repo-metadata.json) \
+ --issue-tracker=$(jq --raw-output '.issue_tracker // empty' .repo-metadata.json)
+
+cat docs.metadata
+
+# upload docs
+python3 -m docuploader upload docs/_build/html/docfx_yaml --metadata-file docs.metadata --destination-prefix docfx --staging-bucket "${V2_STAGING_BUCKET}"
diff --git a/.kokoro/release/common.cfg b/.kokoro/release/common.cfg
index ee248ab..cbc301b 100644
--- a/.kokoro/release/common.cfg
+++ b/.kokoro/release/common.cfg
@@ -23,42 +23,18 @@ env_vars: {
value: "github/python-os-config/.kokoro/release.sh"
}
-# Fetch the token needed for reporting release status to GitHub
-before_action {
- fetch_keystore {
- keystore_resource {
- keystore_config_id: 73713
- keyname: "yoshi-automation-github-key"
- }
- }
-}
-
-# Fetch PyPI password
-before_action {
- fetch_keystore {
- keystore_resource {
- keystore_config_id: 73713
- keyname: "google_cloud_pypi_password"
- }
- }
-}
-
-# Fetch magictoken to use with Magic Github Proxy
-before_action {
- fetch_keystore {
- keystore_resource {
- keystore_config_id: 73713
- keyname: "releasetool-magictoken"
- }
- }
+# Fetch PyPI password
+before_action {
+ fetch_keystore {
+ keystore_resource {
+ keystore_config_id: 73713
+ keyname: "google_cloud_pypi_password"
+ }
+ }
}
-# Fetch api key to use with Magic Github Proxy
-before_action {
- fetch_keystore {
- keystore_resource {
- keystore_config_id: 73713
- keyname: "magic-github-proxy-api-key"
- }
- }
-}
+# Tokens needed to report release status back to GitHub
+env_vars: {
+ key: "SECRET_MANAGER_KEYS"
+ value: "releasetool-publish-reporter-app,releasetool-publish-reporter-googleapis-installation,releasetool-publish-reporter-pem"
+}
\ No newline at end of file
diff --git a/.kokoro/samples/python3.6/common.cfg b/.kokoro/samples/python3.6/common.cfg
index d633453..c6717a1 100644
--- a/.kokoro/samples/python3.6/common.cfg
+++ b/.kokoro/samples/python3.6/common.cfg
@@ -13,6 +13,12 @@ env_vars: {
value: "py-3.6"
}
+# Declare build specific Cloud project.
+env_vars: {
+ key: "BUILD_SPECIFIC_GCLOUD_PROJECT"
+ value: "python-docs-samples-tests-py36"
+}
+
env_vars: {
key: "TRAMPOLINE_BUILD_FILE"
value: "github/python-os-config/.kokoro/test-samples.sh"
diff --git a/.kokoro/samples/python3.7/common.cfg b/.kokoro/samples/python3.7/common.cfg
index 403367e..581207a 100644
--- a/.kokoro/samples/python3.7/common.cfg
+++ b/.kokoro/samples/python3.7/common.cfg
@@ -13,6 +13,12 @@ env_vars: {
value: "py-3.7"
}
+# Declare build specific Cloud project.
+env_vars: {
+ key: "BUILD_SPECIFIC_GCLOUD_PROJECT"
+ value: "python-docs-samples-tests-py37"
+}
+
env_vars: {
key: "TRAMPOLINE_BUILD_FILE"
value: "github/python-os-config/.kokoro/test-samples.sh"
diff --git a/.kokoro/samples/python3.8/common.cfg b/.kokoro/samples/python3.8/common.cfg
index a992d0f..193f553 100644
--- a/.kokoro/samples/python3.8/common.cfg
+++ b/.kokoro/samples/python3.8/common.cfg
@@ -13,6 +13,12 @@ env_vars: {
value: "py-3.8"
}
+# Declare build specific Cloud project.
+env_vars: {
+ key: "BUILD_SPECIFIC_GCLOUD_PROJECT"
+ value: "python-docs-samples-tests-py38"
+}
+
env_vars: {
key: "TRAMPOLINE_BUILD_FILE"
value: "github/python-os-config/.kokoro/test-samples.sh"
diff --git a/.kokoro/test-samples.sh b/.kokoro/test-samples.sh
index ac06883..0975531 100755
--- a/.kokoro/test-samples.sh
+++ b/.kokoro/test-samples.sh
@@ -28,6 +28,12 @@ if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"periodic"* ]]; then
git checkout $LATEST_RELEASE
fi
+# Exit early if samples directory doesn't exist
+if [ ! -d "./samples" ]; then
+ echo "No tests run. `./samples` not found"
+ exit 0
+fi
+
# Disable buffering, so that the logs stream through.
export PYTHONUNBUFFERED=1
@@ -101,4 +107,4 @@ cd "$ROOT"
# Workaround for Kokoro permissions issue: delete secrets
rm testing/{test-env.sh,client-secrets.json,service-account.json}
-exit "$RTN"
\ No newline at end of file
+exit "$RTN"
diff --git a/.kokoro/trampoline.sh b/.kokoro/trampoline.sh
index e8c4251..f39236e 100755
--- a/.kokoro/trampoline.sh
+++ b/.kokoro/trampoline.sh
@@ -15,9 +15,14 @@
set -eo pipefail
-python3 "${KOKORO_GFILE_DIR}/trampoline_v1.py" || ret_code=$?
+# Always run the cleanup script, regardless of the success of bouncing into
+# the container.
+function cleanup() {
+ chmod +x ${KOKORO_GFILE_DIR}/trampoline_cleanup.sh
+ ${KOKORO_GFILE_DIR}/trampoline_cleanup.sh
+ echo "cleanup";
+}
+trap cleanup EXIT
-chmod +x ${KOKORO_GFILE_DIR}/trampoline_cleanup.sh
-${KOKORO_GFILE_DIR}/trampoline_cleanup.sh || true
-
-exit ${ret_code}
+$(dirname $0)/populate-secrets.sh # Secret Manager secrets.
+python3 "${KOKORO_GFILE_DIR}/trampoline_v1.py"
\ No newline at end of file
diff --git a/.kokoro/trampoline_v2.sh b/.kokoro/trampoline_v2.sh
new file mode 100755
index 0000000..719bcd5
--- /dev/null
+++ b/.kokoro/trampoline_v2.sh
@@ -0,0 +1,487 @@
+#!/usr/bin/env bash
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# trampoline_v2.sh
+#
+# This script does 3 things.
+#
+# 1. Prepare the Docker image for the test
+# 2. Run the Docker with appropriate flags to run the test
+# 3. Upload the newly built Docker image
+#
+# in a way that is somewhat compatible with trampoline_v1.
+#
+# To run this script, first download few files from gcs to /dev/shm.
+# (/dev/shm is passed into the container as KOKORO_GFILE_DIR).
+#
+# gsutil cp gs://cloud-devrel-kokoro-resources/python-docs-samples/secrets_viewer_service_account.json /dev/shm
+# gsutil cp gs://cloud-devrel-kokoro-resources/python-docs-samples/automl_secrets.txt /dev/shm
+#
+# Then run the script.
+# .kokoro/trampoline_v2.sh
+#
+# These environment variables are required:
+# TRAMPOLINE_IMAGE: The docker image to use.
+# TRAMPOLINE_DOCKERFILE: The location of the Dockerfile.
+#
+# You can optionally change these environment variables:
+# TRAMPOLINE_IMAGE_UPLOAD:
+# (true|false): Whether to upload the Docker image after the
+# successful builds.
+# TRAMPOLINE_BUILD_FILE: The script to run in the docker container.
+# TRAMPOLINE_WORKSPACE: The workspace path in the docker container.
+# Defaults to /workspace.
+# Potentially there are some repo specific envvars in .trampolinerc in
+# the project root.
+
+
+set -euo pipefail
+
+TRAMPOLINE_VERSION="2.0.5"
+
+if command -v tput >/dev/null && [[ -n "${TERM:-}" ]]; then
+ readonly IO_COLOR_RED="$(tput setaf 1)"
+ readonly IO_COLOR_GREEN="$(tput setaf 2)"
+ readonly IO_COLOR_YELLOW="$(tput setaf 3)"
+ readonly IO_COLOR_RESET="$(tput sgr0)"
+else
+ readonly IO_COLOR_RED=""
+ readonly IO_COLOR_GREEN=""
+ readonly IO_COLOR_YELLOW=""
+ readonly IO_COLOR_RESET=""
+fi
+
+function function_exists {
+ [ $(LC_ALL=C type -t $1)"" == "function" ]
+}
+
+# Logs a message using the given color. The first argument must be one
+# of the IO_COLOR_* variables defined above, such as
+# "${IO_COLOR_YELLOW}". The remaining arguments will be logged in the
+# given color. The log message will also have an RFC-3339 timestamp
+# prepended (in UTC). You can disable the color output by setting
+# TERM=vt100.
+function log_impl() {
+ local color="$1"
+ shift
+ local timestamp="$(date -u "+%Y-%m-%dT%H:%M:%SZ")"
+ echo "================================================================"
+ echo "${color}${timestamp}:" "$@" "${IO_COLOR_RESET}"
+ echo "================================================================"
+}
+
+# Logs the given message with normal coloring and a timestamp.
+function log() {
+ log_impl "${IO_COLOR_RESET}" "$@"
+}
+
+# Logs the given message in green with a timestamp.
+function log_green() {
+ log_impl "${IO_COLOR_GREEN}" "$@"
+}
+
+# Logs the given message in yellow with a timestamp.
+function log_yellow() {
+ log_impl "${IO_COLOR_YELLOW}" "$@"
+}
+
+# Logs the given message in red with a timestamp.
+function log_red() {
+ log_impl "${IO_COLOR_RED}" "$@"
+}
+
+readonly tmpdir=$(mktemp -d -t ci-XXXXXXXX)
+readonly tmphome="${tmpdir}/h"
+mkdir -p "${tmphome}"
+
+function cleanup() {
+ rm -rf "${tmpdir}"
+}
+trap cleanup EXIT
+
+RUNNING_IN_CI="${RUNNING_IN_CI:-false}"
+
+# The workspace in the container, defaults to /workspace.
+TRAMPOLINE_WORKSPACE="${TRAMPOLINE_WORKSPACE:-/workspace}"
+
+pass_down_envvars=(
+ # TRAMPOLINE_V2 variables.
+ # Tells scripts whether they are running as part of CI or not.
+ "RUNNING_IN_CI"
+ # Indicates which CI system we're in.
+ "TRAMPOLINE_CI"
+ # Indicates the version of the script.
+ "TRAMPOLINE_VERSION"
+)
+
+log_yellow "Building with Trampoline ${TRAMPOLINE_VERSION}"
+
+# Detect which CI systems we're in. If we're in any of the CI systems
+# we support, `RUNNING_IN_CI` will be true and `TRAMPOLINE_CI` will be
+# the name of the CI system. Both envvars will be passing down to the
+# container for telling which CI system we're in.
+if [[ -n "${KOKORO_BUILD_ID:-}" ]]; then
+ # descriptive env var for indicating it's on CI.
+ RUNNING_IN_CI="true"
+ TRAMPOLINE_CI="kokoro"
+ if [[ "${TRAMPOLINE_USE_LEGACY_SERVICE_ACCOUNT:-}" == "true" ]]; then
+ if [[ ! -f "${KOKORO_GFILE_DIR}/kokoro-trampoline.service-account.json" ]]; then
+ log_red "${KOKORO_GFILE_DIR}/kokoro-trampoline.service-account.json does not exist. Did you forget to mount cloud-devrel-kokoro-resources/trampoline? Aborting."
+ exit 1
+ fi
+ # This service account will be activated later.
+ TRAMPOLINE_SERVICE_ACCOUNT="${KOKORO_GFILE_DIR}/kokoro-trampoline.service-account.json"
+ else
+ if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then
+ gcloud auth list
+ fi
+ log_yellow "Configuring Container Registry access"
+ gcloud auth configure-docker --quiet
+ fi
+ pass_down_envvars+=(
+ # KOKORO dynamic variables.
+ "KOKORO_BUILD_NUMBER"
+ "KOKORO_BUILD_ID"
+ "KOKORO_JOB_NAME"
+ "KOKORO_GIT_COMMIT"
+ "KOKORO_GITHUB_COMMIT"
+ "KOKORO_GITHUB_PULL_REQUEST_NUMBER"
+ "KOKORO_GITHUB_PULL_REQUEST_COMMIT"
+ # For Build Cop Bot
+ "KOKORO_GITHUB_COMMIT_URL"
+ "KOKORO_GITHUB_PULL_REQUEST_URL"
+ )
+elif [[ "${TRAVIS:-}" == "true" ]]; then
+ RUNNING_IN_CI="true"
+ TRAMPOLINE_CI="travis"
+ pass_down_envvars+=(
+ "TRAVIS_BRANCH"
+ "TRAVIS_BUILD_ID"
+ "TRAVIS_BUILD_NUMBER"
+ "TRAVIS_BUILD_WEB_URL"
+ "TRAVIS_COMMIT"
+ "TRAVIS_COMMIT_MESSAGE"
+ "TRAVIS_COMMIT_RANGE"
+ "TRAVIS_JOB_NAME"
+ "TRAVIS_JOB_NUMBER"
+ "TRAVIS_JOB_WEB_URL"
+ "TRAVIS_PULL_REQUEST"
+ "TRAVIS_PULL_REQUEST_BRANCH"
+ "TRAVIS_PULL_REQUEST_SHA"
+ "TRAVIS_PULL_REQUEST_SLUG"
+ "TRAVIS_REPO_SLUG"
+ "TRAVIS_SECURE_ENV_VARS"
+ "TRAVIS_TAG"
+ )
+elif [[ -n "${GITHUB_RUN_ID:-}" ]]; then
+ RUNNING_IN_CI="true"
+ TRAMPOLINE_CI="github-workflow"
+ pass_down_envvars+=(
+ "GITHUB_WORKFLOW"
+ "GITHUB_RUN_ID"
+ "GITHUB_RUN_NUMBER"
+ "GITHUB_ACTION"
+ "GITHUB_ACTIONS"
+ "GITHUB_ACTOR"
+ "GITHUB_REPOSITORY"
+ "GITHUB_EVENT_NAME"
+ "GITHUB_EVENT_PATH"
+ "GITHUB_SHA"
+ "GITHUB_REF"
+ "GITHUB_HEAD_REF"
+ "GITHUB_BASE_REF"
+ )
+elif [[ "${CIRCLECI:-}" == "true" ]]; then
+ RUNNING_IN_CI="true"
+ TRAMPOLINE_CI="circleci"
+ pass_down_envvars+=(
+ "CIRCLE_BRANCH"
+ "CIRCLE_BUILD_NUM"
+ "CIRCLE_BUILD_URL"
+ "CIRCLE_COMPARE_URL"
+ "CIRCLE_JOB"
+ "CIRCLE_NODE_INDEX"
+ "CIRCLE_NODE_TOTAL"
+ "CIRCLE_PREVIOUS_BUILD_NUM"
+ "CIRCLE_PROJECT_REPONAME"
+ "CIRCLE_PROJECT_USERNAME"
+ "CIRCLE_REPOSITORY_URL"
+ "CIRCLE_SHA1"
+ "CIRCLE_STAGE"
+ "CIRCLE_USERNAME"
+ "CIRCLE_WORKFLOW_ID"
+ "CIRCLE_WORKFLOW_JOB_ID"
+ "CIRCLE_WORKFLOW_UPSTREAM_JOB_IDS"
+ "CIRCLE_WORKFLOW_WORKSPACE_ID"
+ )
+fi
+
+# Configure the service account for pulling the docker image.
+function repo_root() {
+ local dir="$1"
+ while [[ ! -d "${dir}/.git" ]]; do
+ dir="$(dirname "$dir")"
+ done
+ echo "${dir}"
+}
+
+# Detect the project root. In CI builds, we assume the script is in
+# the git tree and traverse from there, otherwise, traverse from `pwd`
+# to find `.git` directory.
+if [[ "${RUNNING_IN_CI:-}" == "true" ]]; then
+ PROGRAM_PATH="$(realpath "$0")"
+ PROGRAM_DIR="$(dirname "${PROGRAM_PATH}")"
+ PROJECT_ROOT="$(repo_root "${PROGRAM_DIR}")"
+else
+ PROJECT_ROOT="$(repo_root $(pwd))"
+fi
+
+log_yellow "Changing to the project root: ${PROJECT_ROOT}."
+cd "${PROJECT_ROOT}"
+
+# To support relative path for `TRAMPOLINE_SERVICE_ACCOUNT`, we need
+# to use this environment variable in `PROJECT_ROOT`.
+if [[ -n "${TRAMPOLINE_SERVICE_ACCOUNT:-}" ]]; then
+
+ mkdir -p "${tmpdir}/gcloud"
+ gcloud_config_dir="${tmpdir}/gcloud"
+
+ log_yellow "Using isolated gcloud config: ${gcloud_config_dir}."
+ export CLOUDSDK_CONFIG="${gcloud_config_dir}"
+
+ log_yellow "Using ${TRAMPOLINE_SERVICE_ACCOUNT} for authentication."
+ gcloud auth activate-service-account \
+ --key-file "${TRAMPOLINE_SERVICE_ACCOUNT}"
+ log_yellow "Configuring Container Registry access"
+ gcloud auth configure-docker --quiet
+fi
+
+required_envvars=(
+ # The basic trampoline configurations.
+ "TRAMPOLINE_IMAGE"
+ "TRAMPOLINE_BUILD_FILE"
+)
+
+if [[ -f "${PROJECT_ROOT}/.trampolinerc" ]]; then
+ source "${PROJECT_ROOT}/.trampolinerc"
+fi
+
+log_yellow "Checking environment variables."
+for e in "${required_envvars[@]}"
+do
+ if [[ -z "${!e:-}" ]]; then
+ log "Missing ${e} env var. Aborting."
+ exit 1
+ fi
+done
+
+# We want to support legacy style TRAMPOLINE_BUILD_FILE used with V1
+# script: e.g. "github/repo-name/.kokoro/run_tests.sh"
+TRAMPOLINE_BUILD_FILE="${TRAMPOLINE_BUILD_FILE#github/*/}"
+log_yellow "Using TRAMPOLINE_BUILD_FILE: ${TRAMPOLINE_BUILD_FILE}"
+
+# ignore error on docker operations and test execution
+set +e
+
+log_yellow "Preparing Docker image."
+# We only download the docker image in CI builds.
+if [[ "${RUNNING_IN_CI:-}" == "true" ]]; then
+ # Download the docker image specified by `TRAMPOLINE_IMAGE`
+
+ # We may want to add --max-concurrent-downloads flag.
+
+ log_yellow "Start pulling the Docker image: ${TRAMPOLINE_IMAGE}."
+ if docker pull "${TRAMPOLINE_IMAGE}"; then
+ log_green "Finished pulling the Docker image: ${TRAMPOLINE_IMAGE}."
+ has_image="true"
+ else
+ log_red "Failed pulling the Docker image: ${TRAMPOLINE_IMAGE}."
+ has_image="false"
+ fi
+else
+ # For local run, check if we have the image.
+ if docker images "${TRAMPOLINE_IMAGE}:latest" | grep "${TRAMPOLINE_IMAGE}"; then
+ has_image="true"
+ else
+ has_image="false"
+ fi
+fi
+
+
+# The default user for a Docker container has uid 0 (root). To avoid
+# creating root-owned files in the build directory we tell docker to
+# use the current user ID.
+user_uid="$(id -u)"
+user_gid="$(id -g)"
+user_name="$(id -un)"
+
+# To allow docker in docker, we add the user to the docker group in
+# the host os.
+docker_gid=$(cut -d: -f3 < <(getent group docker))
+
+update_cache="false"
+if [[ "${TRAMPOLINE_DOCKERFILE:-none}" != "none" ]]; then
+ # Build the Docker image from the source.
+ context_dir=$(dirname "${TRAMPOLINE_DOCKERFILE}")
+ docker_build_flags=(
+ "-f" "${TRAMPOLINE_DOCKERFILE}"
+ "-t" "${TRAMPOLINE_IMAGE}"
+ "--build-arg" "UID=${user_uid}"
+ "--build-arg" "USERNAME=${user_name}"
+ )
+ if [[ "${has_image}" == "true" ]]; then
+ docker_build_flags+=("--cache-from" "${TRAMPOLINE_IMAGE}")
+ fi
+
+ log_yellow "Start building the docker image."
+ if [[ "${TRAMPOLINE_VERBOSE:-false}" == "true" ]]; then
+ echo "docker build" "${docker_build_flags[@]}" "${context_dir}"
+ fi
+
+ # ON CI systems, we want to suppress docker build logs, only
+ # output the logs when it fails.
+ if [[ "${RUNNING_IN_CI:-}" == "true" ]]; then
+ if docker build "${docker_build_flags[@]}" "${context_dir}" \
+ > "${tmpdir}/docker_build.log" 2>&1; then
+ if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then
+ cat "${tmpdir}/docker_build.log"
+ fi
+
+ log_green "Finished building the docker image."
+ update_cache="true"
+ else
+ log_red "Failed to build the Docker image, aborting."
+ log_yellow "Dumping the build logs:"
+ cat "${tmpdir}/docker_build.log"
+ exit 1
+ fi
+ else
+ if docker build "${docker_build_flags[@]}" "${context_dir}"; then
+ log_green "Finished building the docker image."
+ update_cache="true"
+ else
+ log_red "Failed to build the Docker image, aborting."
+ exit 1
+ fi
+ fi
+else
+ if [[ "${has_image}" != "true" ]]; then
+ log_red "We do not have ${TRAMPOLINE_IMAGE} locally, aborting."
+ exit 1
+ fi
+fi
+
+# We use an array for the flags so they are easier to document.
+docker_flags=(
+ # Remove the container after it exists.
+ "--rm"
+
+ # Use the host network.
+ "--network=host"
+
+ # Run in priviledged mode. We are not using docker for sandboxing or
+ # isolation, just for packaging our dev tools.
+ "--privileged"
+
+ # Run the docker script with the user id. Because the docker image gets to
+ # write in ${PWD} you typically want this to be your user id.
+ # To allow docker in docker, we need to use docker gid on the host.
+ "--user" "${user_uid}:${docker_gid}"
+
+ # Pass down the USER.
+ "--env" "USER=${user_name}"
+
+ # Mount the project directory inside the Docker container.
+ "--volume" "${PROJECT_ROOT}:${TRAMPOLINE_WORKSPACE}"
+ "--workdir" "${TRAMPOLINE_WORKSPACE}"
+ "--env" "PROJECT_ROOT=${TRAMPOLINE_WORKSPACE}"
+
+ # Mount the temporary home directory.
+ "--volume" "${tmphome}:/h"
+ "--env" "HOME=/h"
+
+ # Allow docker in docker.
+ "--volume" "/var/run/docker.sock:/var/run/docker.sock"
+
+ # Mount the /tmp so that docker in docker can mount the files
+ # there correctly.
+ "--volume" "/tmp:/tmp"
+ # Pass down the KOKORO_GFILE_DIR and KOKORO_KEYSTORE_DIR
+ # TODO(tmatsuo): This part is not portable.
+ "--env" "TRAMPOLINE_SECRET_DIR=/secrets"
+ "--volume" "${KOKORO_GFILE_DIR:-/dev/shm}:/secrets/gfile"
+ "--env" "KOKORO_GFILE_DIR=/secrets/gfile"
+ "--volume" "${KOKORO_KEYSTORE_DIR:-/dev/shm}:/secrets/keystore"
+ "--env" "KOKORO_KEYSTORE_DIR=/secrets/keystore"
+)
+
+# Add an option for nicer output if the build gets a tty.
+if [[ -t 0 ]]; then
+ docker_flags+=("-it")
+fi
+
+# Passing down env vars
+for e in "${pass_down_envvars[@]}"
+do
+ if [[ -n "${!e:-}" ]]; then
+ docker_flags+=("--env" "${e}=${!e}")
+ fi
+done
+
+# If arguments are given, all arguments will become the commands run
+# in the container, otherwise run TRAMPOLINE_BUILD_FILE.
+if [[ $# -ge 1 ]]; then
+ log_yellow "Running the given commands '" "${@:1}" "' in the container."
+ readonly commands=("${@:1}")
+ if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then
+ echo docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}" "${commands[@]}"
+ fi
+ docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}" "${commands[@]}"
+else
+ log_yellow "Running the tests in a Docker container."
+ docker_flags+=("--entrypoint=${TRAMPOLINE_BUILD_FILE}")
+ if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then
+ echo docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}"
+ fi
+ docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}"
+fi
+
+
+test_retval=$?
+
+if [[ ${test_retval} -eq 0 ]]; then
+ log_green "Build finished with ${test_retval}"
+else
+ log_red "Build finished with ${test_retval}"
+fi
+
+# Only upload it when the test passes.
+if [[ "${update_cache}" == "true" ]] && \
+ [[ $test_retval == 0 ]] && \
+ [[ "${TRAMPOLINE_IMAGE_UPLOAD:-false}" == "true" ]]; then
+ log_yellow "Uploading the Docker image."
+ if docker push "${TRAMPOLINE_IMAGE}"; then
+ log_green "Finished uploading the Docker image."
+ else
+ log_red "Failed uploading the Docker image."
+ fi
+ # Call trampoline_after_upload_hook if it's defined.
+ if function_exists trampoline_after_upload_hook; then
+ trampoline_after_upload_hook
+ fi
+
+fi
+
+exit "${test_retval}"
diff --git a/.trampolinerc b/.trampolinerc
new file mode 100644
index 0000000..995ee29
--- /dev/null
+++ b/.trampolinerc
@@ -0,0 +1,51 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Template for .trampolinerc
+
+# Add required env vars here.
+required_envvars+=(
+ "STAGING_BUCKET"
+ "V2_STAGING_BUCKET"
+)
+
+# Add env vars which are passed down into the container here.
+pass_down_envvars+=(
+ "STAGING_BUCKET"
+ "V2_STAGING_BUCKET"
+)
+
+# Prevent unintentional override on the default image.
+if [[ "${TRAMPOLINE_IMAGE_UPLOAD:-false}" == "true" ]] && \
+ [[ -z "${TRAMPOLINE_IMAGE:-}" ]]; then
+ echo "Please set TRAMPOLINE_IMAGE if you want to upload the Docker image."
+ exit 1
+fi
+
+# Define the default value if it makes sense.
+if [[ -z "${TRAMPOLINE_IMAGE_UPLOAD:-}" ]]; then
+ TRAMPOLINE_IMAGE_UPLOAD=""
+fi
+
+if [[ -z "${TRAMPOLINE_IMAGE:-}" ]]; then
+ TRAMPOLINE_IMAGE=""
+fi
+
+if [[ -z "${TRAMPOLINE_DOCKERFILE:-}" ]]; then
+ TRAMPOLINE_DOCKERFILE=""
+fi
+
+if [[ -z "${TRAMPOLINE_BUILD_FILE:-}" ]]; then
+ TRAMPOLINE_BUILD_FILE=""
+fi
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
index b3d1f60..039f436 100644
--- a/CODE_OF_CONDUCT.md
+++ b/CODE_OF_CONDUCT.md
@@ -1,44 +1,95 @@
-# Contributor Code of Conduct
+# Code of Conduct
-As contributors and maintainers of this project,
-and in the interest of fostering an open and welcoming community,
-we pledge to respect all people who contribute through reporting issues,
-posting feature requests, updating documentation,
-submitting pull requests or patches, and other activities.
+## Our Pledge
-We are committed to making participation in this project
-a harassment-free experience for everyone,
-regardless of level of experience, gender, gender identity and expression,
-sexual orientation, disability, personal appearance,
-body size, race, ethnicity, age, religion, or nationality.
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project and
+our community a harassment-free experience for everyone, regardless of age, body
+size, disability, ethnicity, gender identity and expression, level of
+experience, education, socio-economic status, nationality, personal appearance,
+race, religion, or sexual identity and orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
-* The use of sexualized language or imagery
-* Personal attacks
-* Trolling or insulting/derogatory comments
-* Public or private harassment
-* Publishing other's private information,
-such as physical or electronic
-addresses, without explicit permission
-* Other unethical or unprofessional conduct.
+* The use of sexualized language or imagery and unwelcome sexual attention or
+ advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic
+ address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable
+behavior and are expected to take appropriate and fair corrective action in
+response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or reject
-comments, commits, code, wiki edits, issues, and other contributions
-that are not aligned to this Code of Conduct.
-By adopting this Code of Conduct,
-project maintainers commit themselves to fairly and consistently
-applying these principles to every aspect of managing this project.
-Project maintainers who do not follow or enforce the Code of Conduct
-may be permanently removed from the project team.
-
-This code of conduct applies both within project spaces and in public spaces
-when an individual is representing the project or its community.
-
-Instances of abusive, harassing, or otherwise unacceptable behavior
-may be reported by opening an issue
-or contacting one or more of the project maintainers.
-
-This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0,
-available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/)
+comments, commits, code, wiki edits, issues, and other contributions that are
+not aligned to this Code of Conduct, or to ban temporarily or permanently any
+contributor for other behaviors that they deem inappropriate, threatening,
+offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an appointed
+representative at an online or offline event. Representation of a project may be
+further defined and clarified by project maintainers.
+
+This Code of Conduct also applies outside the project spaces when the Project
+Steward has a reasonable belief that an individual's behavior may have a
+negative impact on the project or its community.
+
+## Conflict Resolution
+
+We do not believe that all conflict is bad; healthy debate and disagreement
+often yield positive results. However, it is never okay to be disrespectful or
+to engage in behavior that violates the project’s code of conduct.
+
+If you see someone violating the code of conduct, you are encouraged to address
+the behavior directly with those involved. Many issues can be resolved quickly
+and easily, and this gives people more control over the outcome of their
+dispute. If you are unable to resolve the matter for any reason, or if the
+behavior is threatening or harassing, report it. We are dedicated to providing
+an environment where participants feel welcome and safe.
+
+
+Reports should be directed to *googleapis-stewards@google.com*, the
+Project Steward(s) for *Google Cloud Client Libraries*. It is the Project Steward’s duty to
+receive and address reported violations of the code of conduct. They will then
+work with a committee consisting of representatives from the Open Source
+Programs Office and the Google Open Source Strategy team. If for any reason you
+are uncomfortable reaching out to the Project Steward, please email
+opensource@google.com.
+
+We will investigate every complaint, but you may not receive a direct response.
+We will use our discretion in determining when and how to follow up on reported
+incidents, which may range from not taking action to permanent expulsion from
+the project and project-sponsored spaces. We will notify the accused of the
+report and provide them an opportunity to discuss it before any action is taken.
+The identity of the reporter will be omitted from the details of the report
+supplied to the accused. In potentially harmful situations, such as ongoing
+harassment or threats to anyone's safety, we may take action without notice.
+
+## Attribution
+
+This Code of Conduct is adapted from the Contributor Covenant, version 1.4,
+available at
+https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
\ No newline at end of file
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index 8c7a98a..798e1f5 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -80,25 +80,6 @@ We use `nox `__ to instrument our tests.
.. nox: https://pypi.org/project/nox/
-Note on Editable Installs / Develop Mode
-========================================
-
-- As mentioned previously, using ``setuptools`` in `develop mode`_
- or a ``pip`` `editable install`_ is not possible with this
- library. This is because this library uses `namespace packages`_.
- For context see `Issue #2316`_ and the relevant `PyPA issue`_.
-
- Since ``editable`` / ``develop`` mode can't be used, packages
- need to be installed directly. Hence your changes to the source
- tree don't get incorporated into the **already installed**
- package.
-
-.. _namespace packages: https://www.python.org/dev/peps/pep-0420/
-.. _Issue #2316: https://github.com/GoogleCloudPlatform/google-cloud-python/issues/2316
-.. _PyPA issue: https://github.com/pypa/packaging-problems/issues/12
-.. _develop mode: https://setuptools.readthedocs.io/en/latest/setuptools.html#development-mode
-.. _editable install: https://pip.pypa.io/en/stable/reference/pip_install/#editable-installs
-
*****************************************
I'm getting weird errors... Can you help?
*****************************************
diff --git a/docs/_templates/layout.html b/docs/_templates/layout.html
index 228529e..6316a53 100644
--- a/docs/_templates/layout.html
+++ b/docs/_templates/layout.html
@@ -21,8 +21,8 @@
- On January 1, 2020 this library will no longer support Python 2 on the latest released version.
- Previously released library versions will continue to be available. For more information please
+ As of January 1, 2020 this library no longer supports Python 2 on the latest released version.
+ Library versions released prior to that date will continue to be available. For more information please
visit
Python 2 support on Google Cloud.
{% block body %} {% endblock %}
diff --git a/docs/conf.py b/docs/conf.py
index 3fa863c..7940715 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -20,12 +20,16 @@
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
+# For plugins that can not read conf.py.
+# See also: https://github.com/docascode/sphinx-docfx-yaml/issues/85
+sys.path.insert(0, os.path.abspath("."))
+
__version__ = ""
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
-needs_sphinx = "1.6.3"
+needs_sphinx = "1.5.5"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
@@ -35,6 +39,7 @@
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
+ "sphinx.ext.doctest",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
@@ -90,7 +95,12 @@
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
-exclude_patterns = ["_build"]
+exclude_patterns = [
+ "_build",
+ "samples/AUTHORING_GUIDE.md",
+ "samples/CONTRIBUTING.md",
+ "samples/snippets/README.rst",
+]
# The reST default role (used for this markup: `text`) to use for all
# documents.
@@ -337,8 +347,9 @@
intersphinx_mapping = {
"python": ("http://python.readthedocs.org/en/latest/", None),
"google-auth": ("https://google-auth.readthedocs.io/en/stable", None),
- "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None),
+ "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None,),
"grpc": ("https://grpc.io/grpc/python/", None),
+ "proto-plus": ("https://proto-plus-python.readthedocs.io/en/latest/", None),
}
diff --git a/docs/osconfig_v1/types.rst b/docs/osconfig_v1/types.rst
index 3121f99..2fdbc30 100644
--- a/docs/osconfig_v1/types.rst
+++ b/docs/osconfig_v1/types.rst
@@ -3,3 +3,4 @@ Types for Google Cloud Osconfig v1 API
.. automodule:: google.cloud.osconfig_v1.types
:members:
+ :show-inheritance:
diff --git a/google/cloud/osconfig/__init__.py b/google/cloud/osconfig/__init__.py
index 85461f3..2708c1d 100644
--- a/google/cloud/osconfig/__init__.py
+++ b/google/cloud/osconfig/__init__.py
@@ -21,6 +21,8 @@
from google.cloud.osconfig_v1.services.os_config_service.client import (
OsConfigServiceClient,
)
+from google.cloud.osconfig_v1.types.inventory import Inventory
+from google.cloud.osconfig_v1.types.osconfig_common import FixedOrPercent
from google.cloud.osconfig_v1.types.patch_deployments import (
CreatePatchDeploymentRequest,
)
@@ -57,6 +59,7 @@
from google.cloud.osconfig_v1.types.patch_jobs import PatchInstanceFilter
from google.cloud.osconfig_v1.types.patch_jobs import PatchJob
from google.cloud.osconfig_v1.types.patch_jobs import PatchJobInstanceDetails
+from google.cloud.osconfig_v1.types.patch_jobs import PatchRollout
from google.cloud.osconfig_v1.types.patch_jobs import WindowsUpdateSettings
from google.cloud.osconfig_v1.types.patch_jobs import YumSettings
from google.cloud.osconfig_v1.types.patch_jobs import ZypperSettings
@@ -69,11 +72,13 @@
"ExecStep",
"ExecStepConfig",
"ExecutePatchJobRequest",
+ "FixedOrPercent",
"GcsObject",
"GetPatchDeploymentRequest",
"GetPatchJobRequest",
"GooSettings",
"Instance",
+ "Inventory",
"ListPatchDeploymentsRequest",
"ListPatchDeploymentsResponse",
"ListPatchJobInstanceDetailsRequest",
@@ -89,6 +94,7 @@
"PatchInstanceFilter",
"PatchJob",
"PatchJobInstanceDetails",
+ "PatchRollout",
"RecurringSchedule",
"WeekDayOfMonth",
"WeeklySchedule",
diff --git a/google/cloud/osconfig_v1/__init__.py b/google/cloud/osconfig_v1/__init__.py
index 723191a..3153077 100644
--- a/google/cloud/osconfig_v1/__init__.py
+++ b/google/cloud/osconfig_v1/__init__.py
@@ -16,6 +16,8 @@
#
from .services.os_config_service import OsConfigServiceClient
+from .types.inventory import Inventory
+from .types.osconfig_common import FixedOrPercent
from .types.patch_deployments import CreatePatchDeploymentRequest
from .types.patch_deployments import DeletePatchDeploymentRequest
from .types.patch_deployments import GetPatchDeploymentRequest
@@ -44,6 +46,7 @@
from .types.patch_jobs import PatchInstanceFilter
from .types.patch_jobs import PatchJob
from .types.patch_jobs import PatchJobInstanceDetails
+from .types.patch_jobs import PatchRollout
from .types.patch_jobs import WindowsUpdateSettings
from .types.patch_jobs import YumSettings
from .types.patch_jobs import ZypperSettings
@@ -57,11 +60,13 @@
"ExecStep",
"ExecStepConfig",
"ExecutePatchJobRequest",
+ "FixedOrPercent",
"GcsObject",
"GetPatchDeploymentRequest",
"GetPatchJobRequest",
"GooSettings",
"Instance",
+ "Inventory",
"ListPatchDeploymentsRequest",
"ListPatchDeploymentsResponse",
"ListPatchJobInstanceDetailsRequest",
@@ -75,6 +80,7 @@
"PatchInstanceFilter",
"PatchJob",
"PatchJobInstanceDetails",
+ "PatchRollout",
"RecurringSchedule",
"WeekDayOfMonth",
"WeeklySchedule",
diff --git a/google/cloud/osconfig_v1/services/os_config_service/__init__.py b/google/cloud/osconfig_v1/services/os_config_service/__init__.py
index 5f9e562..0d1727b 100644
--- a/google/cloud/osconfig_v1/services/os_config_service/__init__.py
+++ b/google/cloud/osconfig_v1/services/os_config_service/__init__.py
@@ -18,4 +18,7 @@
from .client import OsConfigServiceClient
from .async_client import OsConfigServiceAsyncClient
-__all__ = ("OsConfigServiceClient", "OsConfigServiceAsyncClient")
+__all__ = (
+ "OsConfigServiceClient",
+ "OsConfigServiceAsyncClient",
+)
diff --git a/google/cloud/osconfig_v1/services/os_config_service/async_client.py b/google/cloud/osconfig_v1/services/os_config_service/async_client.py
index b06f297..0c4c1e6 100644
--- a/google/cloud/osconfig_v1/services/os_config_service/async_client.py
+++ b/google/cloud/osconfig_v1/services/os_config_service/async_client.py
@@ -34,7 +34,7 @@
from google.protobuf import duration_pb2 as duration # type: ignore
from google.protobuf import timestamp_pb2 as timestamp # type: ignore
-from .transports.base import OsConfigServiceTransport
+from .transports.base import OsConfigServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import OsConfigServiceGrpcAsyncIOTransport
from .client import OsConfigServiceClient
@@ -51,11 +51,56 @@ class OsConfigServiceAsyncClient:
DEFAULT_ENDPOINT = OsConfigServiceClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = OsConfigServiceClient.DEFAULT_MTLS_ENDPOINT
+ instance_path = staticmethod(OsConfigServiceClient.instance_path)
+ parse_instance_path = staticmethod(OsConfigServiceClient.parse_instance_path)
patch_deployment_path = staticmethod(OsConfigServiceClient.patch_deployment_path)
+ parse_patch_deployment_path = staticmethod(
+ OsConfigServiceClient.parse_patch_deployment_path
+ )
+ patch_job_path = staticmethod(OsConfigServiceClient.patch_job_path)
+ parse_patch_job_path = staticmethod(OsConfigServiceClient.parse_patch_job_path)
+
+ common_billing_account_path = staticmethod(
+ OsConfigServiceClient.common_billing_account_path
+ )
+ parse_common_billing_account_path = staticmethod(
+ OsConfigServiceClient.parse_common_billing_account_path
+ )
+
+ common_folder_path = staticmethod(OsConfigServiceClient.common_folder_path)
+ parse_common_folder_path = staticmethod(
+ OsConfigServiceClient.parse_common_folder_path
+ )
+
+ common_organization_path = staticmethod(
+ OsConfigServiceClient.common_organization_path
+ )
+ parse_common_organization_path = staticmethod(
+ OsConfigServiceClient.parse_common_organization_path
+ )
+
+ common_project_path = staticmethod(OsConfigServiceClient.common_project_path)
+ parse_common_project_path = staticmethod(
+ OsConfigServiceClient.parse_common_project_path
+ )
+
+ common_location_path = staticmethod(OsConfigServiceClient.common_location_path)
+ parse_common_location_path = staticmethod(
+ OsConfigServiceClient.parse_common_location_path
+ )
from_service_account_file = OsConfigServiceClient.from_service_account_file
from_service_account_json = from_service_account_file
+ @property
+ def transport(self) -> OsConfigServiceTransport:
+ """Return the transport used by the client instance.
+
+ Returns:
+ OsConfigServiceTransport: The transport used by the client instance.
+ """
+ return self._client.transport
+
get_transport_class = functools.partial(
type(OsConfigServiceClient).get_transport_class, type(OsConfigServiceClient)
)
@@ -66,6 +111,7 @@ def __init__(
credentials: credentials.Credentials = None,
transport: Union[str, OsConfigServiceTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the os config service client.
@@ -81,16 +127,19 @@ def __init__(
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
- default endpoint provided by the client. GOOGLE_API_USE_MTLS
+ default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
- use the default regular endpoint, this is the default value for
- the environment variable) and "auto" (auto switch to the default
- mTLS endpoint if client SSL credentials is present). However,
- the ``api_endpoint`` property takes precedence if provided.
- (2) The ``client_cert_source`` property is used to provide client
- SSL credentials for mutual TLS transport. If not provided, the
- default SSL credentials will be used if present.
+ use the default regular endpoint) and "auto" (auto switch to the
+ default mTLS endpoint if client certificate is present, this is
+ the default value). However, the ``api_endpoint`` property takes
+ precedence if provided.
+ (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
+ is "true", then the ``client_cert_source`` property can be used
+ to provide client certificate for mutual TLS transport. If
+ not provided, the default SSL client certificate will be used if
+ present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
+ set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
@@ -98,7 +147,10 @@ def __init__(
"""
self._client = OsConfigServiceClient(
- credentials=credentials, transport=transport, client_options=client_options
+ credentials=credentials,
+ transport=transport,
+ client_options=client_options,
+ client_info=client_info,
)
async def execute_patch_job(
@@ -128,7 +180,7 @@ async def execute_patch_job(
A high level representation of a patch job that is
either in progress or has completed.
- Instances details are not included in the job. To
+ Instance details are not included in the job. To
paginate through instance details, use
ListPatchJobInstanceDetails.
@@ -146,7 +198,7 @@ async def execute_patch_job(
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.execute_patch_job,
default_timeout=None,
- client_info=_client_info,
+ client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
@@ -156,7 +208,7 @@ async def execute_patch_job(
)
# Send the request.
- response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata)
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
@@ -196,7 +248,7 @@ async def get_patch_job(
A high level representation of a patch job that is
either in progress or has completed.
- Instances details are not included in the job. To
+ Instance details are not included in the job. To
paginate through instance details, use
ListPatchJobInstanceDetails.
@@ -208,7 +260,8 @@ async def get_patch_job(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([name]):
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -227,7 +280,7 @@ async def get_patch_job(
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_patch_job,
default_timeout=None,
- client_info=_client_info,
+ client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
@@ -237,7 +290,7 @@ async def get_patch_job(
)
# Send the request.
- response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata)
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
@@ -268,7 +321,7 @@ async def cancel_patch_job(
A high level representation of a patch job that is
either in progress or has completed.
- Instances details are not included in the job. To
+ Instance details are not included in the job. To
paginate through instance details, use
ListPatchJobInstanceDetails.
@@ -286,7 +339,7 @@ async def cancel_patch_job(
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.cancel_patch_job,
default_timeout=None,
- client_info=_client_info,
+ client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
@@ -296,7 +349,7 @@ async def cancel_patch_job(
)
# Send the request.
- response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata)
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
@@ -340,7 +393,8 @@ async def list_patch_jobs(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([parent]):
+ has_flattened_params = any([parent])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -359,7 +413,7 @@ async def list_patch_jobs(
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_patch_jobs,
default_timeout=None,
- client_info=_client_info,
+ client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
@@ -369,12 +423,12 @@ async def list_patch_jobs(
)
# Send the request.
- response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata)
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListPatchJobsAsyncPager(
- method=rpc, request=request, response=response
+ method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
@@ -420,7 +474,8 @@ async def list_patch_job_instance_details(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([parent]):
+ has_flattened_params = any([parent])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -439,7 +494,7 @@ async def list_patch_job_instance_details(
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_patch_job_instance_details,
default_timeout=None,
- client_info=_client_info,
+ client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
@@ -449,12 +504,12 @@ async def list_patch_job_instance_details(
)
# Send the request.
- response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata)
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListPatchJobInstanceDetailsAsyncPager(
- method=rpc, request=request, response=response
+ method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
@@ -523,7 +578,8 @@ async def create_patch_deployment(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([parent, patch_deployment, patch_deployment_id]):
+ has_flattened_params = any([parent, patch_deployment, patch_deployment_id])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -546,7 +602,7 @@ async def create_patch_deployment(
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_patch_deployment,
default_timeout=None,
- client_info=_client_info,
+ client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
@@ -556,7 +612,7 @@ async def create_patch_deployment(
)
# Send the request.
- response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata)
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
@@ -602,7 +658,8 @@ async def get_patch_deployment(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([name]):
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -621,7 +678,7 @@ async def get_patch_deployment(
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_patch_deployment,
default_timeout=None,
- client_info=_client_info,
+ client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
@@ -631,7 +688,7 @@ async def get_patch_deployment(
)
# Send the request.
- response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata)
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
@@ -676,7 +733,8 @@ async def list_patch_deployments(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([parent]):
+ has_flattened_params = any([parent])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -695,7 +753,7 @@ async def list_patch_deployments(
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_patch_deployments,
default_timeout=None,
- client_info=_client_info,
+ client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
@@ -705,12 +763,12 @@ async def list_patch_deployments(
)
# Send the request.
- response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata)
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListPatchDeploymentsAsyncPager(
- method=rpc, request=request, response=response
+ method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
@@ -747,7 +805,8 @@ async def delete_patch_deployment(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([name]):
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
@@ -766,7 +825,7 @@ async def delete_patch_deployment(
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_patch_deployment,
default_timeout=None,
- client_info=_client_info,
+ client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
@@ -776,15 +835,17 @@ async def delete_patch_deployment(
)
# Send the request.
- await rpc(request, retry=retry, timeout=timeout, metadata=metadata)
+ await rpc(
+ request, retry=retry, timeout=timeout, metadata=metadata,
+ )
try:
- _client_info = gapic_v1.client_info.ClientInfo(
- gapic_version=pkg_resources.get_distribution("google-cloud-os-config").version
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
+ gapic_version=pkg_resources.get_distribution("google-cloud-os-config",).version,
)
except pkg_resources.DistributionNotFound:
- _client_info = gapic_v1.client_info.ClientInfo()
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("OsConfigServiceAsyncClient",)
diff --git a/google/cloud/osconfig_v1/services/os_config_service/client.py b/google/cloud/osconfig_v1/services/os_config_service/client.py
index 9e0ba0e..d74daa2 100644
--- a/google/cloud/osconfig_v1/services/os_config_service/client.py
+++ b/google/cloud/osconfig_v1/services/os_config_service/client.py
@@ -16,17 +16,19 @@
#
from collections import OrderedDict
+from distutils import util
import os
import re
-from typing import Callable, Dict, Sequence, Tuple, Type, Union
+from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
-import google.api_core.client_options as ClientOptions # type: ignore
+from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport import mtls # type: ignore
+from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
@@ -36,7 +38,7 @@
from google.protobuf import duration_pb2 as duration # type: ignore
from google.protobuf import timestamp_pb2 as timestamp # type: ignore
-from .transports.base import OsConfigServiceTransport
+from .transports.base import OsConfigServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import OsConfigServiceGrpcTransport
from .transports.grpc_asyncio import OsConfigServiceGrpcAsyncIOTransport
@@ -55,7 +57,7 @@ class OsConfigServiceClientMeta(type):
_transport_registry["grpc"] = OsConfigServiceGrpcTransport
_transport_registry["grpc_asyncio"] = OsConfigServiceGrpcAsyncIOTransport
- def get_transport_class(cls, label: str = None) -> Type[OsConfigServiceTransport]:
+ def get_transport_class(cls, label: str = None,) -> Type[OsConfigServiceTransport]:
"""Return an appropriate transport class.
Args:
@@ -135,11 +137,36 @@ def from_service_account_file(cls, filename: str, *args, **kwargs):
from_service_account_json = from_service_account_file
+ @property
+ def transport(self) -> OsConfigServiceTransport:
+ """Return the transport used by the client instance.
+
+ Returns:
+ OsConfigServiceTransport: The transport used by the client instance.
+ """
+ return self._transport
+
@staticmethod
- def patch_deployment_path(project: str, patch_deployment: str) -> str:
+ def instance_path(project: str, zone: str, instance: str,) -> str:
+ """Return a fully-qualified instance string."""
+ return "projects/{project}/zones/{zone}/instances/{instance}".format(
+ project=project, zone=zone, instance=instance,
+ )
+
+ @staticmethod
+ def parse_instance_path(path: str) -> Dict[str, str]:
+ """Parse a instance path into its component segments."""
+ m = re.match(
+ r"^projects/(?P
.+?)/zones/(?P.+?)/instances/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def patch_deployment_path(project: str, patch_deployment: str,) -> str:
"""Return a fully-qualified patch_deployment string."""
return "projects/{project}/patchDeployments/{patch_deployment}".format(
- project=project, patch_deployment=patch_deployment
+ project=project, patch_deployment=patch_deployment,
)
@staticmethod
@@ -151,12 +178,85 @@ def parse_patch_deployment_path(path: str) -> Dict[str, str]:
)
return m.groupdict() if m else {}
+ @staticmethod
+ def patch_job_path(project: str, patch_job: str,) -> str:
+ """Return a fully-qualified patch_job string."""
+ return "projects/{project}/patchJobs/{patch_job}".format(
+ project=project, patch_job=patch_job,
+ )
+
+ @staticmethod
+ def parse_patch_job_path(path: str) -> Dict[str, str]:
+ """Parse a patch_job path into its component segments."""
+ m = re.match(r"^projects/(?P.+?)/patchJobs/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_billing_account_path(billing_account: str,) -> str:
+ """Return a fully-qualified billing_account string."""
+ return "billingAccounts/{billing_account}".format(
+ billing_account=billing_account,
+ )
+
+ @staticmethod
+ def parse_common_billing_account_path(path: str) -> Dict[str, str]:
+ """Parse a billing_account path into its component segments."""
+ m = re.match(r"^billingAccounts/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_folder_path(folder: str,) -> str:
+ """Return a fully-qualified folder string."""
+ return "folders/{folder}".format(folder=folder,)
+
+ @staticmethod
+ def parse_common_folder_path(path: str) -> Dict[str, str]:
+ """Parse a folder path into its component segments."""
+ m = re.match(r"^folders/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_organization_path(organization: str,) -> str:
+ """Return a fully-qualified organization string."""
+ return "organizations/{organization}".format(organization=organization,)
+
+ @staticmethod
+ def parse_common_organization_path(path: str) -> Dict[str, str]:
+ """Parse a organization path into its component segments."""
+ m = re.match(r"^organizations/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_project_path(project: str,) -> str:
+ """Return a fully-qualified project string."""
+ return "projects/{project}".format(project=project,)
+
+ @staticmethod
+ def parse_common_project_path(path: str) -> Dict[str, str]:
+ """Parse a project path into its component segments."""
+ m = re.match(r"^projects/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_location_path(project: str, location: str,) -> str:
+ """Return a fully-qualified location string."""
+ return "projects/{project}/locations/{location}".format(
+ project=project, location=location,
+ )
+
+ @staticmethod
+ def parse_common_location_path(path: str) -> Dict[str, str]:
+ """Parse a location path into its component segments."""
+ m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
def __init__(
self,
*,
- credentials: credentials.Credentials = None,
- transport: Union[str, OsConfigServiceTransport] = None,
- client_options: ClientOptions = None,
+ credentials: Optional[credentials.Credentials] = None,
+ transport: Union[str, OsConfigServiceTransport, None] = None,
+ client_options: Optional[client_options_lib.ClientOptions] = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the os config service client.
@@ -169,48 +269,74 @@ def __init__(
transport (Union[str, ~.OsConfigServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
- client_options (ClientOptions): Custom options for the client. It
- won't take effect if a ``transport`` instance is provided.
+ client_options (client_options_lib.ClientOptions): Custom options for the
+ client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
- default endpoint provided by the client. GOOGLE_API_USE_MTLS
+ default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
- use the default regular endpoint, this is the default value for
- the environment variable) and "auto" (auto switch to the default
- mTLS endpoint if client SSL credentials is present). However,
- the ``api_endpoint`` property takes precedence if provided.
- (2) The ``client_cert_source`` property is used to provide client
- SSL credentials for mutual TLS transport. If not provided, the
- default SSL credentials will be used if present.
+ use the default regular endpoint) and "auto" (auto switch to the
+ default mTLS endpoint if client certificate is present, this is
+ the default value). However, the ``api_endpoint`` property takes
+ precedence if provided.
+ (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
+ is "true", then the ``client_cert_source`` property can be used
+ to provide client certificate for mutual TLS transport. If
+ not provided, the default SSL client certificate will be used if
+ present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
+ set, no client certificate will be used.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
- client_options = ClientOptions.from_dict(client_options)
+ client_options = client_options_lib.from_dict(client_options)
if client_options is None:
- client_options = ClientOptions.ClientOptions()
+ client_options = client_options_lib.ClientOptions()
+
+ # Create SSL credentials for mutual TLS if needed.
+ use_client_cert = bool(
+ util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
+ )
+
+ ssl_credentials = None
+ is_mtls = False
+ if use_client_cert:
+ if client_options.client_cert_source:
+ import grpc # type: ignore
- if client_options.api_endpoint is None:
- use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS", "never")
+ cert, key = client_options.client_cert_source()
+ ssl_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
+ is_mtls = True
+ else:
+ creds = SslCredentials()
+ is_mtls = creds.is_mtls
+ ssl_credentials = creds.ssl_credentials if is_mtls else None
+
+ # Figure out which api endpoint to use.
+ if client_options.api_endpoint is not None:
+ api_endpoint = client_options.api_endpoint
+ else:
+ use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
- client_options.api_endpoint = self.DEFAULT_ENDPOINT
+ api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
- client_options.api_endpoint = self.DEFAULT_MTLS_ENDPOINT
+ api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
- has_client_cert_source = (
- client_options.client_cert_source is not None
- or mtls.has_default_client_cert_source()
- )
- client_options.api_endpoint = (
- self.DEFAULT_MTLS_ENDPOINT
- if has_client_cert_source
- else self.DEFAULT_ENDPOINT
+ api_endpoint = (
+ self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
- "Unsupported GOOGLE_API_USE_MTLS value. Accepted values: never, auto, always"
+ "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
@@ -218,19 +344,27 @@ def __init__(
# instance provides an extensibility point for unusual situations.
if isinstance(transport, OsConfigServiceTransport):
# transport is a OsConfigServiceTransport instance.
- if credentials:
+ if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
+ if client_options.scopes:
+ raise ValueError(
+ "When providing a transport instance, "
+ "provide its scopes directly."
+ )
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
- host=client_options.api_endpoint,
- api_mtls_endpoint=client_options.api_endpoint,
- client_cert_source=client_options.client_cert_source,
+ credentials_file=client_options.credentials_file,
+ host=api_endpoint,
+ scopes=client_options.scopes,
+ ssl_channel_credentials=ssl_credentials,
+ quota_project_id=client_options.quota_project_id,
+ client_info=client_info,
)
def execute_patch_job(
@@ -260,7 +394,7 @@ def execute_patch_job(
A high level representation of a patch job that is
either in progress or has completed.
- Instances details are not included in the job. To
+ Instance details are not included in the job. To
paginate through instance details, use
ListPatchJobInstanceDetails.
@@ -271,15 +405,16 @@ def execute_patch_job(
"""
# Create or coerce a protobuf request object.
- request = patch_jobs.ExecutePatchJobRequest(request)
+ # Minor optimization to avoid making a copy if the user passes
+ # in a patch_jobs.ExecutePatchJobRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, patch_jobs.ExecutePatchJobRequest):
+ request = patch_jobs.ExecutePatchJobRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method.wrap_method(
- self._transport.execute_patch_job,
- default_timeout=None,
- client_info=_client_info,
- )
+ rpc = self._transport._wrapped_methods[self._transport.execute_patch_job]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -288,7 +423,7 @@ def execute_patch_job(
)
# Send the request.
- response = rpc(request, retry=retry, timeout=timeout, metadata=metadata)
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
@@ -328,7 +463,7 @@ def get_patch_job(
A high level representation of a patch job that is
either in progress or has completed.
- Instances details are not included in the job. To
+ Instance details are not included in the job. To
paginate through instance details, use
ListPatchJobInstanceDetails.
@@ -340,27 +475,29 @@ def get_patch_job(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([name]):
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = patch_jobs.GetPatchJobRequest(request)
+ # Minor optimization to avoid making a copy if the user passes
+ # in a patch_jobs.GetPatchJobRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, patch_jobs.GetPatchJobRequest):
+ request = patch_jobs.GetPatchJobRequest(request)
- # If we have keyword arguments corresponding to fields on the
- # request, apply these.
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
- if name is not None:
- request.name = name
+ if name is not None:
+ request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method.wrap_method(
- self._transport.get_patch_job,
- default_timeout=None,
- client_info=_client_info,
- )
+ rpc = self._transport._wrapped_methods[self._transport.get_patch_job]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -369,7 +506,7 @@ def get_patch_job(
)
# Send the request.
- response = rpc(request, retry=retry, timeout=timeout, metadata=metadata)
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
@@ -400,7 +537,7 @@ def cancel_patch_job(
A high level representation of a patch job that is
either in progress or has completed.
- Instances details are not included in the job. To
+ Instance details are not included in the job. To
paginate through instance details, use
ListPatchJobInstanceDetails.
@@ -411,15 +548,16 @@ def cancel_patch_job(
"""
# Create or coerce a protobuf request object.
- request = patch_jobs.CancelPatchJobRequest(request)
+ # Minor optimization to avoid making a copy if the user passes
+ # in a patch_jobs.CancelPatchJobRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, patch_jobs.CancelPatchJobRequest):
+ request = patch_jobs.CancelPatchJobRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method.wrap_method(
- self._transport.cancel_patch_job,
- default_timeout=None,
- client_info=_client_info,
- )
+ rpc = self._transport._wrapped_methods[self._transport.cancel_patch_job]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -428,7 +566,7 @@ def cancel_patch_job(
)
# Send the request.
- response = rpc(request, retry=retry, timeout=timeout, metadata=metadata)
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
@@ -472,27 +610,29 @@ def list_patch_jobs(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([parent]):
+ has_flattened_params = any([parent])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = patch_jobs.ListPatchJobsRequest(request)
+ # Minor optimization to avoid making a copy if the user passes
+ # in a patch_jobs.ListPatchJobsRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, patch_jobs.ListPatchJobsRequest):
+ request = patch_jobs.ListPatchJobsRequest(request)
- # If we have keyword arguments corresponding to fields on the
- # request, apply these.
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
- if parent is not None:
- request.parent = parent
+ if parent is not None:
+ request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method.wrap_method(
- self._transport.list_patch_jobs,
- default_timeout=None,
- client_info=_client_info,
- )
+ rpc = self._transport._wrapped_methods[self._transport.list_patch_jobs]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -501,12 +641,12 @@ def list_patch_jobs(
)
# Send the request.
- response = rpc(request, retry=retry, timeout=timeout, metadata=metadata)
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListPatchJobsPager(
- method=rpc, request=request, response=response
+ method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
@@ -552,27 +692,31 @@ def list_patch_job_instance_details(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([parent]):
+ has_flattened_params = any([parent])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = patch_jobs.ListPatchJobInstanceDetailsRequest(request)
+ # Minor optimization to avoid making a copy if the user passes
+ # in a patch_jobs.ListPatchJobInstanceDetailsRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, patch_jobs.ListPatchJobInstanceDetailsRequest):
+ request = patch_jobs.ListPatchJobInstanceDetailsRequest(request)
- # If we have keyword arguments corresponding to fields on the
- # request, apply these.
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
- if parent is not None:
- request.parent = parent
+ if parent is not None:
+ request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method.wrap_method(
- self._transport.list_patch_job_instance_details,
- default_timeout=None,
- client_info=_client_info,
- )
+ rpc = self._transport._wrapped_methods[
+ self._transport.list_patch_job_instance_details
+ ]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -581,12 +725,12 @@ def list_patch_job_instance_details(
)
# Send the request.
- response = rpc(request, retry=retry, timeout=timeout, metadata=metadata)
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListPatchJobInstanceDetailsPager(
- method=rpc, request=request, response=response
+ method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
@@ -655,31 +799,33 @@ def create_patch_deployment(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([parent, patch_deployment, patch_deployment_id]):
+ has_flattened_params = any([parent, patch_deployment, patch_deployment_id])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = patch_deployments.CreatePatchDeploymentRequest(request)
+ # Minor optimization to avoid making a copy if the user passes
+ # in a patch_deployments.CreatePatchDeploymentRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, patch_deployments.CreatePatchDeploymentRequest):
+ request = patch_deployments.CreatePatchDeploymentRequest(request)
- # If we have keyword arguments corresponding to fields on the
- # request, apply these.
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
- if parent is not None:
- request.parent = parent
- if patch_deployment is not None:
- request.patch_deployment = patch_deployment
- if patch_deployment_id is not None:
- request.patch_deployment_id = patch_deployment_id
+ if parent is not None:
+ request.parent = parent
+ if patch_deployment is not None:
+ request.patch_deployment = patch_deployment
+ if patch_deployment_id is not None:
+ request.patch_deployment_id = patch_deployment_id
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method.wrap_method(
- self._transport.create_patch_deployment,
- default_timeout=None,
- client_info=_client_info,
- )
+ rpc = self._transport._wrapped_methods[self._transport.create_patch_deployment]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -688,7 +834,7 @@ def create_patch_deployment(
)
# Send the request.
- response = rpc(request, retry=retry, timeout=timeout, metadata=metadata)
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
@@ -734,27 +880,29 @@ def get_patch_deployment(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([name]):
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = patch_deployments.GetPatchDeploymentRequest(request)
+ # Minor optimization to avoid making a copy if the user passes
+ # in a patch_deployments.GetPatchDeploymentRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, patch_deployments.GetPatchDeploymentRequest):
+ request = patch_deployments.GetPatchDeploymentRequest(request)
- # If we have keyword arguments corresponding to fields on the
- # request, apply these.
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
- if name is not None:
- request.name = name
+ if name is not None:
+ request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method.wrap_method(
- self._transport.get_patch_deployment,
- default_timeout=None,
- client_info=_client_info,
- )
+ rpc = self._transport._wrapped_methods[self._transport.get_patch_deployment]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -763,7 +911,7 @@ def get_patch_deployment(
)
# Send the request.
- response = rpc(request, retry=retry, timeout=timeout, metadata=metadata)
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
@@ -808,27 +956,29 @@ def list_patch_deployments(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([parent]):
+ has_flattened_params = any([parent])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = patch_deployments.ListPatchDeploymentsRequest(request)
+ # Minor optimization to avoid making a copy if the user passes
+ # in a patch_deployments.ListPatchDeploymentsRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, patch_deployments.ListPatchDeploymentsRequest):
+ request = patch_deployments.ListPatchDeploymentsRequest(request)
- # If we have keyword arguments corresponding to fields on the
- # request, apply these.
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
- if parent is not None:
- request.parent = parent
+ if parent is not None:
+ request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method.wrap_method(
- self._transport.list_patch_deployments,
- default_timeout=None,
- client_info=_client_info,
- )
+ rpc = self._transport._wrapped_methods[self._transport.list_patch_deployments]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -837,12 +987,12 @@ def list_patch_deployments(
)
# Send the request.
- response = rpc(request, retry=retry, timeout=timeout, metadata=metadata)
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListPatchDeploymentsPager(
- method=rpc, request=request, response=response
+ method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
@@ -879,27 +1029,29 @@ def delete_patch_deployment(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([name]):
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = patch_deployments.DeletePatchDeploymentRequest(request)
+ # Minor optimization to avoid making a copy if the user passes
+ # in a patch_deployments.DeletePatchDeploymentRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, patch_deployments.DeletePatchDeploymentRequest):
+ request = patch_deployments.DeletePatchDeploymentRequest(request)
- # If we have keyword arguments corresponding to fields on the
- # request, apply these.
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
- if name is not None:
- request.name = name
+ if name is not None:
+ request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method.wrap_method(
- self._transport.delete_patch_deployment,
- default_timeout=None,
- client_info=_client_info,
- )
+ rpc = self._transport._wrapped_methods[self._transport.delete_patch_deployment]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -908,15 +1060,17 @@ def delete_patch_deployment(
)
# Send the request.
- rpc(request, retry=retry, timeout=timeout, metadata=metadata)
+ rpc(
+ request, retry=retry, timeout=timeout, metadata=metadata,
+ )
try:
- _client_info = gapic_v1.client_info.ClientInfo(
- gapic_version=pkg_resources.get_distribution("google-cloud-os-config").version
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
+ gapic_version=pkg_resources.get_distribution("google-cloud-os-config",).version,
)
except pkg_resources.DistributionNotFound:
- _client_info = gapic_v1.client_info.ClientInfo()
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("OsConfigServiceClient",)
diff --git a/google/cloud/osconfig_v1/services/os_config_service/pagers.py b/google/cloud/osconfig_v1/services/os_config_service/pagers.py
index 83ef8f6..f9593f5 100644
--- a/google/cloud/osconfig_v1/services/os_config_service/pagers.py
+++ b/google/cloud/osconfig_v1/services/os_config_service/pagers.py
@@ -15,7 +15,7 @@
# limitations under the License.
#
-from typing import Any, AsyncIterable, Awaitable, Callable, Iterable
+from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple
from google.cloud.osconfig_v1.types import patch_deployments
from google.cloud.osconfig_v1.types import patch_jobs
@@ -41,11 +41,11 @@ class ListPatchJobsPager:
def __init__(
self,
- method: Callable[
- [patch_jobs.ListPatchJobsRequest], patch_jobs.ListPatchJobsResponse
- ],
+ method: Callable[..., patch_jobs.ListPatchJobsResponse],
request: patch_jobs.ListPatchJobsRequest,
response: patch_jobs.ListPatchJobsResponse,
+ *,
+ metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
@@ -56,10 +56,13 @@ def __init__(
The initial request object.
response (:class:`~.patch_jobs.ListPatchJobsResponse`):
The initial response object.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
"""
self._method = method
self._request = patch_jobs.ListPatchJobsRequest(request)
self._response = response
+ self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@@ -69,7 +72,7 @@ def pages(self) -> Iterable[patch_jobs.ListPatchJobsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
- self._response = self._method(self._request)
+ self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[patch_jobs.PatchJob]:
@@ -100,12 +103,11 @@ class ListPatchJobsAsyncPager:
def __init__(
self,
- method: Callable[
- [patch_jobs.ListPatchJobsRequest],
- Awaitable[patch_jobs.ListPatchJobsResponse],
- ],
+ method: Callable[..., Awaitable[patch_jobs.ListPatchJobsResponse]],
request: patch_jobs.ListPatchJobsRequest,
response: patch_jobs.ListPatchJobsResponse,
+ *,
+ metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
@@ -116,10 +118,13 @@ def __init__(
The initial request object.
response (:class:`~.patch_jobs.ListPatchJobsResponse`):
The initial response object.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
"""
self._method = method
self._request = patch_jobs.ListPatchJobsRequest(request)
self._response = response
+ self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@@ -129,7 +134,7 @@ async def pages(self) -> AsyncIterable[patch_jobs.ListPatchJobsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
- self._response = await self._method(self._request)
+ self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[patch_jobs.PatchJob]:
@@ -164,12 +169,11 @@ class ListPatchJobInstanceDetailsPager:
def __init__(
self,
- method: Callable[
- [patch_jobs.ListPatchJobInstanceDetailsRequest],
- patch_jobs.ListPatchJobInstanceDetailsResponse,
- ],
+ method: Callable[..., patch_jobs.ListPatchJobInstanceDetailsResponse],
request: patch_jobs.ListPatchJobInstanceDetailsRequest,
response: patch_jobs.ListPatchJobInstanceDetailsResponse,
+ *,
+ metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
@@ -180,10 +184,13 @@ def __init__(
The initial request object.
response (:class:`~.patch_jobs.ListPatchJobInstanceDetailsResponse`):
The initial response object.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
"""
self._method = method
self._request = patch_jobs.ListPatchJobInstanceDetailsRequest(request)
self._response = response
+ self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@@ -193,7 +200,7 @@ def pages(self) -> Iterable[patch_jobs.ListPatchJobInstanceDetailsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
- self._response = self._method(self._request)
+ self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[patch_jobs.PatchJobInstanceDetails]:
@@ -225,11 +232,12 @@ class ListPatchJobInstanceDetailsAsyncPager:
def __init__(
self,
method: Callable[
- [patch_jobs.ListPatchJobInstanceDetailsRequest],
- Awaitable[patch_jobs.ListPatchJobInstanceDetailsResponse],
+ ..., Awaitable[patch_jobs.ListPatchJobInstanceDetailsResponse]
],
request: patch_jobs.ListPatchJobInstanceDetailsRequest,
response: patch_jobs.ListPatchJobInstanceDetailsResponse,
+ *,
+ metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
@@ -240,22 +248,25 @@ def __init__(
The initial request object.
response (:class:`~.patch_jobs.ListPatchJobInstanceDetailsResponse`):
The initial response object.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
"""
self._method = method
self._request = patch_jobs.ListPatchJobInstanceDetailsRequest(request)
self._response = response
+ self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(
- self
+ self,
) -> AsyncIterable[patch_jobs.ListPatchJobInstanceDetailsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
- self._response = await self._method(self._request)
+ self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[patch_jobs.PatchJobInstanceDetails]:
@@ -290,12 +301,11 @@ class ListPatchDeploymentsPager:
def __init__(
self,
- method: Callable[
- [patch_deployments.ListPatchDeploymentsRequest],
- patch_deployments.ListPatchDeploymentsResponse,
- ],
+ method: Callable[..., patch_deployments.ListPatchDeploymentsResponse],
request: patch_deployments.ListPatchDeploymentsRequest,
response: patch_deployments.ListPatchDeploymentsResponse,
+ *,
+ metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
@@ -306,10 +316,13 @@ def __init__(
The initial request object.
response (:class:`~.patch_deployments.ListPatchDeploymentsResponse`):
The initial response object.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
"""
self._method = method
self._request = patch_deployments.ListPatchDeploymentsRequest(request)
self._response = response
+ self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@@ -319,7 +332,7 @@ def pages(self) -> Iterable[patch_deployments.ListPatchDeploymentsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
- self._response = self._method(self._request)
+ self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[patch_deployments.PatchDeployment]:
@@ -351,11 +364,12 @@ class ListPatchDeploymentsAsyncPager:
def __init__(
self,
method: Callable[
- [patch_deployments.ListPatchDeploymentsRequest],
- Awaitable[patch_deployments.ListPatchDeploymentsResponse],
+ ..., Awaitable[patch_deployments.ListPatchDeploymentsResponse]
],
request: patch_deployments.ListPatchDeploymentsRequest,
response: patch_deployments.ListPatchDeploymentsResponse,
+ *,
+ metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
@@ -366,22 +380,25 @@ def __init__(
The initial request object.
response (:class:`~.patch_deployments.ListPatchDeploymentsResponse`):
The initial response object.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
"""
self._method = method
self._request = patch_deployments.ListPatchDeploymentsRequest(request)
self._response = response
+ self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(
- self
+ self,
) -> AsyncIterable[patch_deployments.ListPatchDeploymentsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
- self._response = await self._method(self._request)
+ self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[patch_deployments.PatchDeployment]:
diff --git a/google/cloud/osconfig_v1/services/os_config_service/transports/__init__.py b/google/cloud/osconfig_v1/services/os_config_service/transports/__init__.py
index 242dbcd..c4974e1 100644
--- a/google/cloud/osconfig_v1/services/os_config_service/transports/__init__.py
+++ b/google/cloud/osconfig_v1/services/os_config_service/transports/__init__.py
@@ -28,7 +28,6 @@
_transport_registry["grpc"] = OsConfigServiceGrpcTransport
_transport_registry["grpc_asyncio"] = OsConfigServiceGrpcAsyncIOTransport
-
__all__ = (
"OsConfigServiceTransport",
"OsConfigServiceGrpcTransport",
diff --git a/google/cloud/osconfig_v1/services/os_config_service/transports/base.py b/google/cloud/osconfig_v1/services/os_config_service/transports/base.py
index 190f55f..5ef50cc 100644
--- a/google/cloud/osconfig_v1/services/os_config_service/transports/base.py
+++ b/google/cloud/osconfig_v1/services/os_config_service/transports/base.py
@@ -17,8 +17,12 @@
import abc
import typing
+import pkg_resources
-from google import auth
+from google import auth # type: ignore
+from google.api_core import exceptions # type: ignore
+from google.api_core import gapic_v1 # type: ignore
+from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.cloud.osconfig_v1.types import patch_deployments
@@ -26,6 +30,14 @@
from google.protobuf import empty_pb2 as empty # type: ignore
+try:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
+ gapic_version=pkg_resources.get_distribution("google-cloud-os-config",).version,
+ )
+except pkg_resources.DistributionNotFound:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
+
+
class OsConfigServiceTransport(abc.ABC):
"""Abstract transport class for OsConfigService."""
@@ -36,6 +48,10 @@ def __init__(
*,
host: str = "osconfig.googleapis.com",
credentials: credentials.Credentials = None,
+ credentials_file: typing.Optional[str] = None,
+ scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES,
+ quota_project_id: typing.Optional[str] = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
**kwargs,
) -> None:
"""Instantiate the transport.
@@ -47,6 +63,17 @@ def __init__(
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is mutually exclusive with credentials.
+ scope (Optional[Sequence[str]]): A list of scopes.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
@@ -55,15 +82,72 @@ def __init__(
# If no credentials are provided, then determine the appropriate
# defaults.
- if credentials is None:
- credentials, _ = auth.default(scopes=self.AUTH_SCOPES)
+ if credentials and credentials_file:
+ raise exceptions.DuplicateCredentialArgs(
+ "'credentials_file' and 'credentials' are mutually exclusive"
+ )
+
+ if credentials_file is not None:
+ credentials, _ = auth.load_credentials_from_file(
+ credentials_file, scopes=scopes, quota_project_id=quota_project_id
+ )
+
+ elif credentials is None:
+ credentials, _ = auth.default(
+ scopes=scopes, quota_project_id=quota_project_id
+ )
# Save the credentials.
self._credentials = credentials
+ # Lifted into its own function so it can be stubbed out during tests.
+ self._prep_wrapped_messages(client_info)
+
+ def _prep_wrapped_messages(self, client_info):
+ # Precompute the wrapped methods.
+ self._wrapped_methods = {
+ self.execute_patch_job: gapic_v1.method.wrap_method(
+ self.execute_patch_job, default_timeout=None, client_info=client_info,
+ ),
+ self.get_patch_job: gapic_v1.method.wrap_method(
+ self.get_patch_job, default_timeout=None, client_info=client_info,
+ ),
+ self.cancel_patch_job: gapic_v1.method.wrap_method(
+ self.cancel_patch_job, default_timeout=None, client_info=client_info,
+ ),
+ self.list_patch_jobs: gapic_v1.method.wrap_method(
+ self.list_patch_jobs, default_timeout=None, client_info=client_info,
+ ),
+ self.list_patch_job_instance_details: gapic_v1.method.wrap_method(
+ self.list_patch_job_instance_details,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.create_patch_deployment: gapic_v1.method.wrap_method(
+ self.create_patch_deployment,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.get_patch_deployment: gapic_v1.method.wrap_method(
+ self.get_patch_deployment,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.list_patch_deployments: gapic_v1.method.wrap_method(
+ self.list_patch_deployments,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ self.delete_patch_deployment: gapic_v1.method.wrap_method(
+ self.delete_patch_deployment,
+ default_timeout=None,
+ client_info=client_info,
+ ),
+ }
+
@property
def execute_patch_job(
- self
+ self,
) -> typing.Callable[
[patch_jobs.ExecutePatchJobRequest],
typing.Union[patch_jobs.PatchJob, typing.Awaitable[patch_jobs.PatchJob]],
@@ -72,7 +156,7 @@ def execute_patch_job(
@property
def get_patch_job(
- self
+ self,
) -> typing.Callable[
[patch_jobs.GetPatchJobRequest],
typing.Union[patch_jobs.PatchJob, typing.Awaitable[patch_jobs.PatchJob]],
@@ -81,7 +165,7 @@ def get_patch_job(
@property
def cancel_patch_job(
- self
+ self,
) -> typing.Callable[
[patch_jobs.CancelPatchJobRequest],
typing.Union[patch_jobs.PatchJob, typing.Awaitable[patch_jobs.PatchJob]],
@@ -90,7 +174,7 @@ def cancel_patch_job(
@property
def list_patch_jobs(
- self
+ self,
) -> typing.Callable[
[patch_jobs.ListPatchJobsRequest],
typing.Union[
@@ -102,7 +186,7 @@ def list_patch_jobs(
@property
def list_patch_job_instance_details(
- self
+ self,
) -> typing.Callable[
[patch_jobs.ListPatchJobInstanceDetailsRequest],
typing.Union[
@@ -114,7 +198,7 @@ def list_patch_job_instance_details(
@property
def create_patch_deployment(
- self
+ self,
) -> typing.Callable[
[patch_deployments.CreatePatchDeploymentRequest],
typing.Union[
@@ -126,7 +210,7 @@ def create_patch_deployment(
@property
def get_patch_deployment(
- self
+ self,
) -> typing.Callable[
[patch_deployments.GetPatchDeploymentRequest],
typing.Union[
@@ -138,7 +222,7 @@ def get_patch_deployment(
@property
def list_patch_deployments(
- self
+ self,
) -> typing.Callable[
[patch_deployments.ListPatchDeploymentsRequest],
typing.Union[
@@ -150,7 +234,7 @@ def list_patch_deployments(
@property
def delete_patch_deployment(
- self
+ self,
) -> typing.Callable[
[patch_deployments.DeletePatchDeploymentRequest],
typing.Union[empty.Empty, typing.Awaitable[empty.Empty]],
diff --git a/google/cloud/osconfig_v1/services/os_config_service/transports/grpc.py b/google/cloud/osconfig_v1/services/os_config_service/transports/grpc.py
index 69f0de6..6159fb7 100644
--- a/google/cloud/osconfig_v1/services/os_config_service/transports/grpc.py
+++ b/google/cloud/osconfig_v1/services/os_config_service/transports/grpc.py
@@ -15,21 +15,22 @@
# limitations under the License.
#
+import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers # type: ignore
+from google.api_core import gapic_v1 # type: ignore
from google import auth # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
-
import grpc # type: ignore
from google.cloud.osconfig_v1.types import patch_deployments
from google.cloud.osconfig_v1.types import patch_jobs
from google.protobuf import empty_pb2 as empty # type: ignore
-from .base import OsConfigServiceTransport
+from .base import OsConfigServiceTransport, DEFAULT_CLIENT_INFO
class OsConfigServiceGrpcTransport(OsConfigServiceTransport):
@@ -55,9 +56,14 @@ def __init__(
*,
host: str = "osconfig.googleapis.com",
credentials: credentials.Credentials = None,
+ credentials_file: str = None,
+ scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
- client_cert_source: Callable[[], Tuple[bytes, bytes]] = None
+ client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
+ ssl_channel_credentials: grpc.ChannelCredentials = None,
+ quota_project_id: Optional[str] = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
@@ -69,21 +75,39 @@ def __init__(
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is ignored if ``channel`` is provided.
+ scopes (Optional(Sequence[str])): A list of scopes. This argument is
+ ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
- api_mtls_endpoint (Optional[str]): The mutual TLS endpoint. If
- provided, it overrides the ``host`` argument and tries to create
+ api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
+ If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
- client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): A
- callback to provide client SSL certificate bytes and private key
- bytes, both in PEM format. It is ignored if ``api_mtls_endpoint``
- is None.
+ client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
+ Deprecated. A callback to provide client SSL certificate bytes and
+ private key bytes, both in PEM format. It is ignored if
+ ``api_mtls_endpoint`` is None.
+ ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
+ for grpc channel. It is ignored if ``channel`` is provided.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
Raises:
- google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
- creation failed for any reason.
+ google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
+ creation failed for any reason.
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
+ and ``credentials_file`` are passed.
"""
+ self._ssl_channel_credentials = ssl_channel_credentials
+
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
@@ -91,7 +115,13 @@ def __init__(
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
+ self._ssl_channel_credentials = None
elif api_mtls_endpoint:
+ warnings.warn(
+ "api_mtls_endpoint and client_cert_source are deprecated",
+ DeprecationWarning,
+ )
+
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
@@ -99,7 +129,9 @@ def __init__(
)
if credentials is None:
- credentials, _ = auth.default(scopes=self.AUTH_SCOPES)
+ credentials, _ = auth.default(
+ scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
+ )
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
@@ -115,63 +147,96 @@ def __init__(
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
+ credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
- scopes=self.AUTH_SCOPES,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ )
+ self._ssl_channel_credentials = ssl_credentials
+ else:
+ host = host if ":" in host else host + ":443"
+
+ if credentials is None:
+ credentials, _ = auth.default(
+ scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
+ )
+
+ # create a new channel. The provided one is ignored.
+ self._grpc_channel = type(self).create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ ssl_credentials=ssl_channel_credentials,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
)
- # Run the base constructor.
- super().__init__(host=host, credentials=credentials)
self._stubs = {} # type: Dict[str, Callable]
+ # Run the base constructor.
+ super().__init__(
+ host=host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ client_info=client_info,
+ )
+
@classmethod
def create_channel(
cls,
host: str = "osconfig.googleapis.com",
credentials: credentials.Credentials = None,
+ credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
- **kwargs
+ quota_project_id: Optional[str] = None,
+ **kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
- address (Optionsl[str]): The host for the channel to use.
+ address (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
+
+ Raises:
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
+ and ``credentials_file`` are passed.
"""
scopes = scopes or cls.AUTH_SCOPES
return grpc_helpers.create_channel(
- host, credentials=credentials, scopes=scopes, **kwargs
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ scopes=scopes,
+ quota_project_id=quota_project_id,
+ **kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
- """Create the channel designed to connect to this service.
-
- This property caches on the instance; repeated calls return
- the same channel.
+ """Return the channel designed to connect to this service.
"""
- # Sanity check: Only create a new channel if we do not already
- # have one.
- if not hasattr(self, "_grpc_channel"):
- self._grpc_channel = self.create_channel(
- self._host, credentials=self._credentials
- )
-
- # Return the channel from cache.
return self._grpc_channel
@property
def execute_patch_job(
- self
+ self,
) -> Callable[[patch_jobs.ExecutePatchJobRequest], patch_jobs.PatchJob]:
r"""Return a callable for the execute patch job method over gRPC.
@@ -198,7 +263,7 @@ def execute_patch_job(
@property
def get_patch_job(
- self
+ self,
) -> Callable[[patch_jobs.GetPatchJobRequest], patch_jobs.PatchJob]:
r"""Return a callable for the get patch job method over gRPC.
@@ -226,7 +291,7 @@ def get_patch_job(
@property
def cancel_patch_job(
- self
+ self,
) -> Callable[[patch_jobs.CancelPatchJobRequest], patch_jobs.PatchJob]:
r"""Return a callable for the cancel patch job method over gRPC.
@@ -253,7 +318,7 @@ def cancel_patch_job(
@property
def list_patch_jobs(
- self
+ self,
) -> Callable[[patch_jobs.ListPatchJobsRequest], patch_jobs.ListPatchJobsResponse]:
r"""Return a callable for the list patch jobs method over gRPC.
@@ -279,7 +344,7 @@ def list_patch_jobs(
@property
def list_patch_job_instance_details(
- self
+ self,
) -> Callable[
[patch_jobs.ListPatchJobInstanceDetailsRequest],
patch_jobs.ListPatchJobInstanceDetailsResponse,
@@ -311,7 +376,7 @@ def list_patch_job_instance_details(
@property
def create_patch_deployment(
- self
+ self,
) -> Callable[
[patch_deployments.CreatePatchDeploymentRequest],
patch_deployments.PatchDeployment,
@@ -340,7 +405,7 @@ def create_patch_deployment(
@property
def get_patch_deployment(
- self
+ self,
) -> Callable[
[patch_deployments.GetPatchDeploymentRequest], patch_deployments.PatchDeployment
]:
@@ -368,7 +433,7 @@ def get_patch_deployment(
@property
def list_patch_deployments(
- self
+ self,
) -> Callable[
[patch_deployments.ListPatchDeploymentsRequest],
patch_deployments.ListPatchDeploymentsResponse,
@@ -397,7 +462,7 @@ def list_patch_deployments(
@property
def delete_patch_deployment(
- self
+ self,
) -> Callable[[patch_deployments.DeletePatchDeploymentRequest], empty.Empty]:
r"""Return a callable for the delete patch deployment method over gRPC.
diff --git a/google/cloud/osconfig_v1/services/os_config_service/transports/grpc_asyncio.py b/google/cloud/osconfig_v1/services/os_config_service/transports/grpc_asyncio.py
index c2601c9..3886d69 100644
--- a/google/cloud/osconfig_v1/services/os_config_service/transports/grpc_asyncio.py
+++ b/google/cloud/osconfig_v1/services/os_config_service/transports/grpc_asyncio.py
@@ -15,9 +15,12 @@
# limitations under the License.
#
+import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple
+from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
+from google import auth # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
@@ -28,7 +31,7 @@
from google.cloud.osconfig_v1.types import patch_jobs
from google.protobuf import empty_pb2 as empty # type: ignore
-from .base import OsConfigServiceTransport
+from .base import OsConfigServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import OsConfigServiceGrpcTransport
@@ -56,8 +59,10 @@ def create_channel(
cls,
host: str = "osconfig.googleapis.com",
credentials: credentials.Credentials = None,
+ credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
- **kwargs
+ quota_project_id: Optional[str] = None,
+ **kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
@@ -67,9 +72,14 @@ def create_channel(
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
@@ -77,7 +87,12 @@ def create_channel(
"""
scopes = scopes or cls.AUTH_SCOPES
return grpc_helpers_async.create_channel(
- host, credentials=credentials, scopes=scopes, **kwargs
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ scopes=scopes,
+ quota_project_id=quota_project_id,
+ **kwargs,
)
def __init__(
@@ -85,9 +100,14 @@ def __init__(
*,
host: str = "osconfig.googleapis.com",
credentials: credentials.Credentials = None,
+ credentials_file: Optional[str] = None,
+ scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
- client_cert_source: Callable[[], Tuple[bytes, bytes]] = None
+ client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
+ ssl_channel_credentials: grpc.ChannelCredentials = None,
+ quota_project_id=None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
@@ -99,21 +119,40 @@ def __init__(
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is ignored if ``channel`` is provided.
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
+ service. These are only used when credentials are not specified and
+ are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
- api_mtls_endpoint (Optional[str]): The mutual TLS endpoint. If
- provided, it overrides the ``host`` argument and tries to create
+ api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
+ If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
- client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): A
- callback to provide client SSL certificate bytes and private key
- bytes, both in PEM format. It is ignored if ``api_mtls_endpoint``
- is None.
+ client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
+ Deprecated. A callback to provide client SSL certificate bytes and
+ private key bytes, both in PEM format. It is ignored if
+ ``api_mtls_endpoint`` is None.
+ ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
+ for grpc channel. It is ignored if ``channel`` is provided.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
Raises:
- google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
+ google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
+ and ``credentials_file`` are passed.
"""
+ self._ssl_channel_credentials = ssl_channel_credentials
+
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
@@ -121,13 +160,24 @@ def __init__(
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
+ self._ssl_channel_credentials = None
elif api_mtls_endpoint:
+ warnings.warn(
+ "api_mtls_endpoint and client_cert_source are deprecated",
+ DeprecationWarning,
+ )
+
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
+ if credentials is None:
+ credentials, _ = auth.default(
+ scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
+ )
+
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
@@ -142,12 +192,40 @@ def __init__(
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
+ credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
- scopes=self.AUTH_SCOPES,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ )
+ self._ssl_channel_credentials = ssl_credentials
+ else:
+ host = host if ":" in host else host + ":443"
+
+ if credentials is None:
+ credentials, _ = auth.default(
+ scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
+ )
+
+ # create a new channel. The provided one is ignored.
+ self._grpc_channel = type(self).create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ ssl_credentials=ssl_channel_credentials,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
)
# Run the base constructor.
- super().__init__(host=host, credentials=credentials)
+ super().__init__(
+ host=host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ client_info=client_info,
+ )
+
self._stubs = {}
@property
@@ -157,19 +235,12 @@ def grpc_channel(self) -> aio.Channel:
This property caches on the instance; repeated calls return
the same channel.
"""
- # Sanity check: Only create a new channel if we do not already
- # have one.
- if not hasattr(self, "_grpc_channel"):
- self._grpc_channel = self.create_channel(
- self._host, credentials=self._credentials
- )
-
# Return the channel from cache.
return self._grpc_channel
@property
def execute_patch_job(
- self
+ self,
) -> Callable[[patch_jobs.ExecutePatchJobRequest], Awaitable[patch_jobs.PatchJob]]:
r"""Return a callable for the execute patch job method over gRPC.
@@ -196,7 +267,7 @@ def execute_patch_job(
@property
def get_patch_job(
- self
+ self,
) -> Callable[[patch_jobs.GetPatchJobRequest], Awaitable[patch_jobs.PatchJob]]:
r"""Return a callable for the get patch job method over gRPC.
@@ -224,7 +295,7 @@ def get_patch_job(
@property
def cancel_patch_job(
- self
+ self,
) -> Callable[[patch_jobs.CancelPatchJobRequest], Awaitable[patch_jobs.PatchJob]]:
r"""Return a callable for the cancel patch job method over gRPC.
@@ -251,7 +322,7 @@ def cancel_patch_job(
@property
def list_patch_jobs(
- self
+ self,
) -> Callable[
[patch_jobs.ListPatchJobsRequest], Awaitable[patch_jobs.ListPatchJobsResponse]
]:
@@ -279,7 +350,7 @@ def list_patch_jobs(
@property
def list_patch_job_instance_details(
- self
+ self,
) -> Callable[
[patch_jobs.ListPatchJobInstanceDetailsRequest],
Awaitable[patch_jobs.ListPatchJobInstanceDetailsResponse],
@@ -311,7 +382,7 @@ def list_patch_job_instance_details(
@property
def create_patch_deployment(
- self
+ self,
) -> Callable[
[patch_deployments.CreatePatchDeploymentRequest],
Awaitable[patch_deployments.PatchDeployment],
@@ -340,7 +411,7 @@ def create_patch_deployment(
@property
def get_patch_deployment(
- self
+ self,
) -> Callable[
[patch_deployments.GetPatchDeploymentRequest],
Awaitable[patch_deployments.PatchDeployment],
@@ -369,7 +440,7 @@ def get_patch_deployment(
@property
def list_patch_deployments(
- self
+ self,
) -> Callable[
[patch_deployments.ListPatchDeploymentsRequest],
Awaitable[patch_deployments.ListPatchDeploymentsResponse],
@@ -398,7 +469,7 @@ def list_patch_deployments(
@property
def delete_patch_deployment(
- self
+ self,
) -> Callable[
[patch_deployments.DeletePatchDeploymentRequest], Awaitable[empty.Empty]
]:
diff --git a/google/cloud/osconfig_v1/types/__init__.py b/google/cloud/osconfig_v1/types/__init__.py
index 26ee20e..957e7b3 100644
--- a/google/cloud/osconfig_v1/types/__init__.py
+++ b/google/cloud/osconfig_v1/types/__init__.py
@@ -15,6 +15,7 @@
# limitations under the License.
#
+from .osconfig_common import FixedOrPercent
from .patch_jobs import (
ExecutePatchJobRequest,
GetPatchJobRequest,
@@ -36,6 +37,7 @@
ExecStepConfig,
GcsObject,
PatchInstanceFilter,
+ PatchRollout,
)
from .patch_deployments import (
PatchDeployment,
@@ -50,9 +52,10 @@
ListPatchDeploymentsResponse,
DeletePatchDeploymentRequest,
)
-
+from .inventory import Inventory
__all__ = (
+ "FixedOrPercent",
"ExecutePatchJobRequest",
"GetPatchJobRequest",
"ListPatchJobInstanceDetailsRequest",
@@ -73,6 +76,7 @@
"ExecStepConfig",
"GcsObject",
"PatchInstanceFilter",
+ "PatchRollout",
"PatchDeployment",
"OneTimeSchedule",
"RecurringSchedule",
@@ -84,4 +88,5 @@
"ListPatchDeploymentsRequest",
"ListPatchDeploymentsResponse",
"DeletePatchDeploymentRequest",
+ "Inventory",
)
diff --git a/google/cloud/osconfig_v1/types/inventory.py b/google/cloud/osconfig_v1/types/inventory.py
new file mode 100644
index 0000000..bb25196
--- /dev/null
+++ b/google/cloud/osconfig_v1/types/inventory.py
@@ -0,0 +1,393 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+from google.protobuf import timestamp_pb2 as timestamp # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.osconfig.v1", manifest={"Inventory",},
+)
+
+
+class Inventory(proto.Message):
+ r"""The inventory details of a VM.
+
+ Attributes:
+ os_info (~.inventory.Inventory.OsInfo):
+ Base level operating system information for
+ the VM.
+ items (Sequence[~.inventory.Inventory.ItemsEntry]):
+ Inventory items related to the VM keyed by an
+ opaque unique identifier for each inventory
+ item. The identifier is unique to each distinct
+ and addressable inventory item and will change,
+ when there is a new package version.
+ """
+
+ class OsInfo(proto.Message):
+ r"""Operating system information for the VM.
+
+ Attributes:
+ hostname (str):
+ The VM hostname.
+ long_name (str):
+ The operating system long name.
+ For example 'Debian GNU/Linux 9' or 'Microsoft
+ Window Server 2019 Datacenter'.
+ short_name (str):
+ The operating system short name.
+ For example, 'windows' or 'debian'.
+ version (str):
+ The version of the operating system.
+ architecture (str):
+ The system architecture of the operating
+ system.
+ kernel_version (str):
+ The kernel version of the operating system.
+ kernel_release (str):
+ The kernel release of the operating system.
+ osconfig_agent_version (str):
+ The current version of the OS Config agent
+ running on the VM.
+ """
+
+ hostname = proto.Field(proto.STRING, number=9)
+
+ long_name = proto.Field(proto.STRING, number=2)
+
+ short_name = proto.Field(proto.STRING, number=3)
+
+ version = proto.Field(proto.STRING, number=4)
+
+ architecture = proto.Field(proto.STRING, number=5)
+
+ kernel_version = proto.Field(proto.STRING, number=6)
+
+ kernel_release = proto.Field(proto.STRING, number=7)
+
+ osconfig_agent_version = proto.Field(proto.STRING, number=8)
+
+ class Item(proto.Message):
+ r"""A single piece of inventory on a VM.
+
+ Attributes:
+ id (str):
+ Identifier for this item, unique across items
+ for this VM.
+ origin_type (~.inventory.Inventory.Item.OriginType):
+ The origin of this inventory item.
+ create_time (~.timestamp.Timestamp):
+ When this inventory item was first detected.
+ update_time (~.timestamp.Timestamp):
+ When this inventory item was last modified.
+ type_ (~.inventory.Inventory.Item.Type):
+ The specific type of inventory, correlating
+ to its specific details.
+ installed_package (~.inventory.Inventory.SoftwarePackage):
+ Software package present on the VM instance.
+ available_package (~.inventory.Inventory.SoftwarePackage):
+ Software package available to be installed on
+ the VM instance.
+ """
+
+ class OriginType(proto.Enum):
+ r"""The origin of a specific inventory item."""
+ ORIGIN_TYPE_UNSPECIFIED = 0
+ INVENTORY_REPORT = 1
+
+ class Type(proto.Enum):
+ r"""The different types of inventory that are tracked on a VM."""
+ TYPE_UNSPECIFIED = 0
+ INSTALLED_PACKAGE = 1
+ AVAILABLE_PACKAGE = 2
+
+ id = proto.Field(proto.STRING, number=1)
+
+ origin_type = proto.Field(
+ proto.ENUM, number=2, enum="Inventory.Item.OriginType",
+ )
+
+ create_time = proto.Field(proto.MESSAGE, number=8, message=timestamp.Timestamp,)
+
+ update_time = proto.Field(proto.MESSAGE, number=9, message=timestamp.Timestamp,)
+
+ type_ = proto.Field(proto.ENUM, number=5, enum="Inventory.Item.Type",)
+
+ installed_package = proto.Field(
+ proto.MESSAGE,
+ number=6,
+ oneof="details",
+ message="Inventory.SoftwarePackage",
+ )
+
+ available_package = proto.Field(
+ proto.MESSAGE,
+ number=7,
+ oneof="details",
+ message="Inventory.SoftwarePackage",
+ )
+
+ class SoftwarePackage(proto.Message):
+ r"""Software package information of the operating system.
+
+ Attributes:
+ yum_package (~.inventory.Inventory.VersionedPackage):
+ Yum package info. For details about the yum package manager,
+ see
+ https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/deployment_guide/ch-yum.
+ apt_package (~.inventory.Inventory.VersionedPackage):
+ Details of an APT package.
+ For details about the apt package manager, see
+ https://wiki.debian.org/Apt.
+ zypper_package (~.inventory.Inventory.VersionedPackage):
+ Details of a Zypper package. For details about the Zypper
+ package manager, see
+ https://en.opensuse.org/SDB:Zypper_manual.
+ googet_package (~.inventory.Inventory.VersionedPackage):
+ Details of a Googet package.
+ For details about the googet package manager,
+ see https://github.com/google/googet.
+ zypper_patch (~.inventory.Inventory.ZypperPatch):
+ Details of a Zypper patch. For details about the Zypper
+ package manager, see
+ https://en.opensuse.org/SDB:Zypper_manual.
+ wua_package (~.inventory.Inventory.WindowsUpdatePackage):
+ Details of a Windows Update package. See
+ https://docs.microsoft.com/en-us/windows/win32/api/_wua/ for
+ information about Windows Update.
+ qfe_package (~.inventory.Inventory.WindowsQuickFixEngineeringPackage):
+ Details of a Windows Quick Fix engineering
+ package. See
+ https://docs.microsoft.com/en-
+ us/windows/win32/cimwin32prov/win32-quickfixengineering
+ for info in Windows Quick Fix Engineering.
+ cos_package (~.inventory.Inventory.VersionedPackage):
+ Details of a COS package.
+ """
+
+ yum_package = proto.Field(
+ proto.MESSAGE,
+ number=1,
+ oneof="details",
+ message="Inventory.VersionedPackage",
+ )
+
+ apt_package = proto.Field(
+ proto.MESSAGE,
+ number=2,
+ oneof="details",
+ message="Inventory.VersionedPackage",
+ )
+
+ zypper_package = proto.Field(
+ proto.MESSAGE,
+ number=3,
+ oneof="details",
+ message="Inventory.VersionedPackage",
+ )
+
+ googet_package = proto.Field(
+ proto.MESSAGE,
+ number=4,
+ oneof="details",
+ message="Inventory.VersionedPackage",
+ )
+
+ zypper_patch = proto.Field(
+ proto.MESSAGE, number=5, oneof="details", message="Inventory.ZypperPatch",
+ )
+
+ wua_package = proto.Field(
+ proto.MESSAGE,
+ number=6,
+ oneof="details",
+ message="Inventory.WindowsUpdatePackage",
+ )
+
+ qfe_package = proto.Field(
+ proto.MESSAGE,
+ number=7,
+ oneof="details",
+ message="Inventory.WindowsQuickFixEngineeringPackage",
+ )
+
+ cos_package = proto.Field(
+ proto.MESSAGE,
+ number=8,
+ oneof="details",
+ message="Inventory.VersionedPackage",
+ )
+
+ class VersionedPackage(proto.Message):
+ r"""Information related to the a standard versioned package.
+ This includes package info for APT, Yum, Zypper, and Googet
+ package managers.
+
+ Attributes:
+ package_name (str):
+ The name of the package.
+ architecture (str):
+ The system architecture this package is
+ intended for.
+ version (str):
+ The version of the package.
+ """
+
+ package_name = proto.Field(proto.STRING, number=4)
+
+ architecture = proto.Field(proto.STRING, number=2)
+
+ version = proto.Field(proto.STRING, number=3)
+
+ class WindowsUpdatePackage(proto.Message):
+ r"""Details related to a Windows Update package. Field data and names
+ are taken from Windows Update API IUpdate Interface:
+ https://docs.microsoft.com/en-us/windows/win32/api/_wua/ Descriptive
+ fields like title, and description are localized based on the locale
+ of the VM being updated.
+
+ Attributes:
+ title (str):
+ The localized title of the update package.
+ description (str):
+ The localized description of the update
+ package.
+ categories (Sequence[~.inventory.Inventory.WindowsUpdatePackage.WindowsUpdateCategory]):
+ The categories that are associated with this
+ update package.
+ kb_article_ids (Sequence[str]):
+ A collection of Microsoft Knowledge Base
+ article IDs that are associated with the update
+ package.
+ support_url (str):
+ A hyperlink to the language-specific support
+ information for the update.
+ more_info_urls (Sequence[str]):
+ A collection of URLs that provide more
+ information about the update package.
+ update_id (str):
+ Gets the identifier of an update package.
+ Stays the same across revisions.
+ revision_number (int):
+ The revision number of this update package.
+ last_deployment_change_time (~.timestamp.Timestamp):
+ The last published date of the update, in
+ (UTC) date and time.
+ """
+
+ class WindowsUpdateCategory(proto.Message):
+ r"""Categories specified by the Windows Update.
+
+ Attributes:
+ id (str):
+ The identifier of the windows update
+ category.
+ name (str):
+ The name of the windows update category.
+ """
+
+ id = proto.Field(proto.STRING, number=1)
+
+ name = proto.Field(proto.STRING, number=2)
+
+ title = proto.Field(proto.STRING, number=1)
+
+ description = proto.Field(proto.STRING, number=2)
+
+ categories = proto.RepeatedField(
+ proto.MESSAGE,
+ number=3,
+ message="Inventory.WindowsUpdatePackage.WindowsUpdateCategory",
+ )
+
+ kb_article_ids = proto.RepeatedField(proto.STRING, number=4)
+
+ support_url = proto.Field(proto.STRING, number=11)
+
+ more_info_urls = proto.RepeatedField(proto.STRING, number=5)
+
+ update_id = proto.Field(proto.STRING, number=6)
+
+ revision_number = proto.Field(proto.INT32, number=7)
+
+ last_deployment_change_time = proto.Field(
+ proto.MESSAGE, number=10, message=timestamp.Timestamp,
+ )
+
+ class ZypperPatch(proto.Message):
+ r"""Details related to a Zypper Patch.
+
+ Attributes:
+ patch_name (str):
+ The name of the patch.
+ category (str):
+ The category of the patch.
+ severity (str):
+ The severity specified for this patch
+ summary (str):
+ Any summary information provided about this
+ patch.
+ """
+
+ patch_name = proto.Field(proto.STRING, number=5)
+
+ category = proto.Field(proto.STRING, number=2)
+
+ severity = proto.Field(proto.STRING, number=3)
+
+ summary = proto.Field(proto.STRING, number=4)
+
+ class WindowsQuickFixEngineeringPackage(proto.Message):
+ r"""Information related to a Quick Fix Engineering package.
+ Fields are taken from Windows QuickFixEngineering Interface and
+ match the source names:
+ https://docs.microsoft.com/en-
+ us/windows/win32/cimwin32prov/win32-quickfixengineering
+
+ Attributes:
+ caption (str):
+ A short textual description of the QFE
+ update.
+ description (str):
+ A textual description of the QFE update.
+ hot_fix_id (str):
+ Unique identifier associated with a
+ particular QFE update.
+ install_time (~.timestamp.Timestamp):
+ Date that the QFE update was installed. Mapped from
+ installed_on field.
+ """
+
+ caption = proto.Field(proto.STRING, number=1)
+
+ description = proto.Field(proto.STRING, number=2)
+
+ hot_fix_id = proto.Field(proto.STRING, number=3)
+
+ install_time = proto.Field(
+ proto.MESSAGE, number=5, message=timestamp.Timestamp,
+ )
+
+ os_info = proto.Field(proto.MESSAGE, number=1, message=OsInfo,)
+
+ items = proto.MapField(proto.STRING, proto.MESSAGE, number=2, message=Item,)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/osconfig_v1/types/osconfig_common.py b/google/cloud/osconfig_v1/types/osconfig_common.py
new file mode 100644
index 0000000..d960126
--- /dev/null
+++ b/google/cloud/osconfig_v1/types/osconfig_common.py
@@ -0,0 +1,44 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import proto # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.osconfig.v1", manifest={"FixedOrPercent",},
+)
+
+
+class FixedOrPercent(proto.Message):
+ r"""Message encapsulating a value that can be either absolute
+ ("fixed") or relative ("percent") to a value.
+
+ Attributes:
+ fixed (int):
+ Specifies a fixed value.
+ percent (int):
+ Specifies the relative value defined as a
+ percentage, which will be multiplied by a
+ reference value.
+ """
+
+ fixed = proto.Field(proto.INT32, number=1, oneof="mode")
+
+ percent = proto.Field(proto.INT32, number=2, oneof="mode")
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/osconfig_v1/types/osconfig_service.py b/google/cloud/osconfig_v1/types/osconfig_service.py
index 4ca6aee..af4e143 100644
--- a/google/cloud/osconfig_v1/types/osconfig_service.py
+++ b/google/cloud/osconfig_v1/types/osconfig_service.py
@@ -16,7 +16,7 @@
#
-__protobuf__ = proto.module(package="google.cloud.osconfig.v1", manifest={})
+__protobuf__ = proto.module(package="google.cloud.osconfig.v1", manifest={},)
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/osconfig_v1/types/patch_deployments.py b/google/cloud/osconfig_v1/types/patch_deployments.py
index 9768def..27982ee 100644
--- a/google/cloud/osconfig_v1/types/patch_deployments.py
+++ b/google/cloud/osconfig_v1/types/patch_deployments.py
@@ -82,32 +82,47 @@ class PatchDeployment(proto.Message):
update_time (~.timestamp.Timestamp):
Output only. Time the patch deployment was last updated.
Timestamp is in
- `RFC3339 <"https://www.ietf.org/rfc/rfc3339.txt>`__ text
+ `RFC3339 `__ text
format.
last_execute_time (~.timestamp.Timestamp):
Output only. The last time a patch job was started by this
deployment. Timestamp is in
`RFC3339 `__ text
format.
+ rollout (~.patch_jobs.PatchRollout):
+ Optional. Rollout strategy of the patch job.
"""
name = proto.Field(proto.STRING, number=1)
+
description = proto.Field(proto.STRING, number=2)
+
instance_filter = proto.Field(
- proto.MESSAGE, number=3, message=patch_jobs.PatchInstanceFilter
+ proto.MESSAGE, number=3, message=patch_jobs.PatchInstanceFilter,
+ )
+
+ patch_config = proto.Field(proto.MESSAGE, number=4, message=patch_jobs.PatchConfig,)
+
+ duration = proto.Field(proto.MESSAGE, number=5, message=gp_duration.Duration,)
+
+ one_time_schedule = proto.Field(
+ proto.MESSAGE, number=6, oneof="schedule", message="OneTimeSchedule",
)
- patch_config = proto.Field(proto.MESSAGE, number=4, message=patch_jobs.PatchConfig)
- duration = proto.Field(proto.MESSAGE, number=5, message=gp_duration.Duration)
- one_time_schedule = proto.Field(proto.MESSAGE, number=6, message="OneTimeSchedule")
+
recurring_schedule = proto.Field(
- proto.MESSAGE, number=7, message="RecurringSchedule"
+ proto.MESSAGE, number=7, oneof="schedule", message="RecurringSchedule",
)
- create_time = proto.Field(proto.MESSAGE, number=8, message=timestamp.Timestamp)
- update_time = proto.Field(proto.MESSAGE, number=9, message=timestamp.Timestamp)
+
+ create_time = proto.Field(proto.MESSAGE, number=8, message=timestamp.Timestamp,)
+
+ update_time = proto.Field(proto.MESSAGE, number=9, message=timestamp.Timestamp,)
+
last_execute_time = proto.Field(
- proto.MESSAGE, number=10, message=timestamp.Timestamp
+ proto.MESSAGE, number=10, message=timestamp.Timestamp,
)
+ rollout = proto.Field(proto.MESSAGE, number=11, message=patch_jobs.PatchRollout,)
+
class OneTimeSchedule(proto.Message):
r"""Sets the time for a one time patch deployment. Timestamp is in
@@ -119,7 +134,7 @@ class OneTimeSchedule(proto.Message):
time.
"""
- execute_time = proto.Field(proto.MESSAGE, number=1, message=timestamp.Timestamp)
+ execute_time = proto.Field(proto.MESSAGE, number=1, message=timestamp.Timestamp,)
class RecurringSchedule(proto.Message):
@@ -161,18 +176,30 @@ class Frequency(proto.Enum):
WEEKLY = 1
MONTHLY = 2
- time_zone = proto.Field(proto.MESSAGE, number=1, message=datetime.TimeZone)
- start_time = proto.Field(proto.MESSAGE, number=2, message=timestamp.Timestamp)
- end_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp)
- time_of_day = proto.Field(proto.MESSAGE, number=4, message=timeofday.TimeOfDay)
- frequency = proto.Field(proto.ENUM, number=5, enum=Frequency)
- weekly = proto.Field(proto.MESSAGE, number=6, message="WeeklySchedule")
- monthly = proto.Field(proto.MESSAGE, number=7, message="MonthlySchedule")
+ time_zone = proto.Field(proto.MESSAGE, number=1, message=datetime.TimeZone,)
+
+ start_time = proto.Field(proto.MESSAGE, number=2, message=timestamp.Timestamp,)
+
+ end_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,)
+
+ time_of_day = proto.Field(proto.MESSAGE, number=4, message=timeofday.TimeOfDay,)
+
+ frequency = proto.Field(proto.ENUM, number=5, enum=Frequency,)
+
+ weekly = proto.Field(
+ proto.MESSAGE, number=6, oneof="schedule_config", message="WeeklySchedule",
+ )
+
+ monthly = proto.Field(
+ proto.MESSAGE, number=7, oneof="schedule_config", message="MonthlySchedule",
+ )
+
last_execute_time = proto.Field(
- proto.MESSAGE, number=9, message=timestamp.Timestamp
+ proto.MESSAGE, number=9, message=timestamp.Timestamp,
)
+
next_execute_time = proto.Field(
- proto.MESSAGE, number=10, message=timestamp.Timestamp
+ proto.MESSAGE, number=10, message=timestamp.Timestamp,
)
@@ -184,7 +211,7 @@ class WeeklySchedule(proto.Message):
Required. Day of the week.
"""
- day_of_week = proto.Field(proto.ENUM, number=1, enum=dayofweek.DayOfWeek)
+ day_of_week = proto.Field(proto.ENUM, number=1, enum=dayofweek.DayOfWeek,)
class MonthlySchedule(proto.Message):
@@ -204,8 +231,11 @@ class MonthlySchedule(proto.Message):
not run in February, April, June, etc.
"""
- week_day_of_month = proto.Field(proto.MESSAGE, number=1, message="WeekDayOfMonth")
- month_day = proto.Field(proto.INT32, number=2)
+ week_day_of_month = proto.Field(
+ proto.MESSAGE, number=1, oneof="day_of_month", message="WeekDayOfMonth",
+ )
+
+ month_day = proto.Field(proto.INT32, number=2, oneof="day_of_month")
class WeekDayOfMonth(proto.Message):
@@ -222,7 +252,8 @@ class WeekDayOfMonth(proto.Message):
"""
week_ordinal = proto.Field(proto.INT32, number=1)
- day_of_week = proto.Field(proto.ENUM, number=2, enum=dayofweek.DayOfWeek)
+
+ day_of_week = proto.Field(proto.ENUM, number=2, enum=dayofweek.DayOfWeek,)
class CreatePatchDeploymentRequest(proto.Message):
@@ -247,8 +278,10 @@ class CreatePatchDeploymentRequest(proto.Message):
"""
parent = proto.Field(proto.STRING, number=1)
+
patch_deployment_id = proto.Field(proto.STRING, number=2)
- patch_deployment = proto.Field(proto.MESSAGE, number=3, message=PatchDeployment)
+
+ patch_deployment = proto.Field(proto.MESSAGE, number=3, message="PatchDeployment",)
class GetPatchDeploymentRequest(proto.Message):
@@ -281,7 +314,9 @@ class ListPatchDeploymentsRequest(proto.Message):
"""
parent = proto.Field(proto.STRING, number=1)
+
page_size = proto.Field(proto.INT32, number=2)
+
page_token = proto.Field(proto.STRING, number=3)
@@ -301,8 +336,9 @@ def raw_page(self):
return self
patch_deployments = proto.RepeatedField(
- proto.MESSAGE, number=1, message=PatchDeployment
+ proto.MESSAGE, number=1, message="PatchDeployment",
)
+
next_page_token = proto.Field(proto.STRING, number=2)
diff --git a/google/cloud/osconfig_v1/types/patch_jobs.py b/google/cloud/osconfig_v1/types/patch_jobs.py
index a5b271c..35fec12 100644
--- a/google/cloud/osconfig_v1/types/patch_jobs.py
+++ b/google/cloud/osconfig_v1/types/patch_jobs.py
@@ -18,6 +18,7 @@
import proto # type: ignore
+from google.cloud.osconfig_v1.types import osconfig_common
from google.protobuf import duration_pb2 as gp_duration # type: ignore
from google.protobuf import timestamp_pb2 as timestamp # type: ignore
@@ -45,6 +46,7 @@
"ExecStepConfig",
"GcsObject",
"PatchInstanceFilter",
+ "PatchRollout",
},
)
@@ -77,18 +79,28 @@ class ExecutePatchJobRequest(proto.Message):
display_name (str):
Display name for this patch job. This does
not have to be unique.
+ rollout (~.gco_patch_jobs.PatchRollout):
+ Rollout strategy of the patch job.
"""
parent = proto.Field(proto.STRING, number=1)
+
description = proto.Field(proto.STRING, number=2)
+
instance_filter = proto.Field(
- proto.MESSAGE, number=7, message="PatchInstanceFilter"
+ proto.MESSAGE, number=7, message="PatchInstanceFilter",
)
- patch_config = proto.Field(proto.MESSAGE, number=4, message="PatchConfig")
- duration = proto.Field(proto.MESSAGE, number=5, message=gp_duration.Duration)
+
+ patch_config = proto.Field(proto.MESSAGE, number=4, message="PatchConfig",)
+
+ duration = proto.Field(proto.MESSAGE, number=5, message=gp_duration.Duration,)
+
dry_run = proto.Field(proto.BOOL, number=6)
+
display_name = proto.Field(proto.STRING, number=8)
+ rollout = proto.Field(proto.MESSAGE, number=9, message="PatchRollout",)
+
class GetPatchJobRequest(proto.Message):
r"""Request to get an active or completed patch job.
@@ -124,8 +136,11 @@ class ListPatchJobInstanceDetailsRequest(proto.Message):
"""
parent = proto.Field(proto.STRING, number=1)
+
page_size = proto.Field(proto.INT32, number=2)
+
page_token = proto.Field(proto.STRING, number=3)
+
filter = proto.Field(proto.STRING, number=4)
@@ -146,8 +161,9 @@ def raw_page(self):
return self
patch_job_instance_details = proto.RepeatedField(
- proto.MESSAGE, number=1, message="PatchJobInstanceDetails"
+ proto.MESSAGE, number=1, message="PatchJobInstanceDetails",
)
+
next_page_token = proto.Field(proto.STRING, number=2)
@@ -175,9 +191,13 @@ class PatchJobInstanceDetails(proto.Message):
"""
name = proto.Field(proto.STRING, number=1)
+
instance_system_id = proto.Field(proto.STRING, number=2)
- state = proto.Field(proto.ENUM, number=3, enum="Instance.PatchState")
+
+ state = proto.Field(proto.ENUM, number=3, enum="Instance.PatchState",)
+
failure_reason = proto.Field(proto.STRING, number=4)
+
attempt_count = proto.Field(proto.INT64, number=5)
@@ -201,8 +221,11 @@ class ListPatchJobsRequest(proto.Message):
"""
parent = proto.Field(proto.STRING, number=1)
+
page_size = proto.Field(proto.INT32, number=2)
+
page_token = proto.Field(proto.STRING, number=3)
+
filter = proto.Field(proto.STRING, number=4)
@@ -221,7 +244,8 @@ class ListPatchJobsResponse(proto.Message):
def raw_page(self):
return self
- patch_jobs = proto.RepeatedField(proto.MESSAGE, number=1, message="PatchJob")
+ patch_jobs = proto.RepeatedField(proto.MESSAGE, number=1, message="PatchJob",)
+
next_page_token = proto.Field(proto.STRING, number=2)
@@ -229,7 +253,7 @@ class PatchJob(proto.Message):
r"""A high level representation of a patch job that is either in
progress or has completed.
- Instances details are not included in the job. To paginate through
+ Instance details are not included in the job. To paginate through
instance details, use ListPatchJobInstanceDetails.
For more information about patch jobs, see `Creating patch
@@ -250,7 +274,7 @@ class PatchJob(proto.Message):
update_time (~.timestamp.Timestamp):
Last time this patch job was updated.
state (~.gco_patch_jobs.PatchJob.State):
- The current state of the PatchJob .
+ The current state of the PatchJob.
instance_filter (~.gco_patch_jobs.PatchInstanceFilter):
Instances to patch.
patch_config (~.gco_patch_jobs.PatchConfig):
@@ -274,6 +298,8 @@ class PatchJob(proto.Message):
patch_deployment (str):
Output only. Name of the patch deployment
that created this patch job.
+ rollout (~.gco_patch_jobs.PatchRollout):
+ Rollout strategy being applied.
"""
class State(proto.Enum):
@@ -339,40 +365,69 @@ class InstanceDetailsSummary(proto.Message):
"""
pending_instance_count = proto.Field(proto.INT64, number=1)
+
inactive_instance_count = proto.Field(proto.INT64, number=2)
+
notified_instance_count = proto.Field(proto.INT64, number=3)
+
started_instance_count = proto.Field(proto.INT64, number=4)
+
downloading_patches_instance_count = proto.Field(proto.INT64, number=5)
+
applying_patches_instance_count = proto.Field(proto.INT64, number=6)
+
rebooting_instance_count = proto.Field(proto.INT64, number=7)
+
succeeded_instance_count = proto.Field(proto.INT64, number=8)
+
succeeded_reboot_required_instance_count = proto.Field(proto.INT64, number=9)
+
failed_instance_count = proto.Field(proto.INT64, number=10)
+
acked_instance_count = proto.Field(proto.INT64, number=11)
+
timed_out_instance_count = proto.Field(proto.INT64, number=12)
+
pre_patch_step_instance_count = proto.Field(proto.INT64, number=13)
+
post_patch_step_instance_count = proto.Field(proto.INT64, number=14)
+
no_agent_detected_instance_count = proto.Field(proto.INT64, number=15)
name = proto.Field(proto.STRING, number=1)
+
display_name = proto.Field(proto.STRING, number=14)
+
description = proto.Field(proto.STRING, number=2)
- create_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp)
- update_time = proto.Field(proto.MESSAGE, number=4, message=timestamp.Timestamp)
- state = proto.Field(proto.ENUM, number=5, enum=State)
+
+ create_time = proto.Field(proto.MESSAGE, number=3, message=timestamp.Timestamp,)
+
+ update_time = proto.Field(proto.MESSAGE, number=4, message=timestamp.Timestamp,)
+
+ state = proto.Field(proto.ENUM, number=5, enum=State,)
+
instance_filter = proto.Field(
- proto.MESSAGE, number=13, message="PatchInstanceFilter"
+ proto.MESSAGE, number=13, message="PatchInstanceFilter",
)
- patch_config = proto.Field(proto.MESSAGE, number=7, message="PatchConfig")
- duration = proto.Field(proto.MESSAGE, number=8, message=gp_duration.Duration)
+
+ patch_config = proto.Field(proto.MESSAGE, number=7, message="PatchConfig",)
+
+ duration = proto.Field(proto.MESSAGE, number=8, message=gp_duration.Duration,)
+
instance_details_summary = proto.Field(
- proto.MESSAGE, number=9, message=InstanceDetailsSummary
+ proto.MESSAGE, number=9, message=InstanceDetailsSummary,
)
+
dry_run = proto.Field(proto.BOOL, number=10)
+
error_message = proto.Field(proto.STRING, number=11)
+
percent_complete = proto.Field(proto.DOUBLE, number=12)
+
patch_deployment = proto.Field(proto.STRING, number=15)
+ rollout = proto.Field(proto.MESSAGE, number=16, message="PatchRollout",)
+
class PatchConfig(proto.Message):
r"""Patch configuration specifications. Contains details on how
@@ -409,16 +464,23 @@ class RebootConfig(proto.Enum):
ALWAYS = 2
NEVER = 3
- reboot_config = proto.Field(proto.ENUM, number=1, enum=RebootConfig)
- apt = proto.Field(proto.MESSAGE, number=3, message="AptSettings")
- yum = proto.Field(proto.MESSAGE, number=4, message="YumSettings")
- goo = proto.Field(proto.MESSAGE, number=5, message="GooSettings")
- zypper = proto.Field(proto.MESSAGE, number=6, message="ZypperSettings")
+ reboot_config = proto.Field(proto.ENUM, number=1, enum=RebootConfig,)
+
+ apt = proto.Field(proto.MESSAGE, number=3, message="AptSettings",)
+
+ yum = proto.Field(proto.MESSAGE, number=4, message="YumSettings",)
+
+ goo = proto.Field(proto.MESSAGE, number=5, message="GooSettings",)
+
+ zypper = proto.Field(proto.MESSAGE, number=6, message="ZypperSettings",)
+
windows_update = proto.Field(
- proto.MESSAGE, number=7, message="WindowsUpdateSettings"
+ proto.MESSAGE, number=7, message="WindowsUpdateSettings",
)
- pre_step = proto.Field(proto.MESSAGE, number=8, message="ExecStep")
- post_step = proto.Field(proto.MESSAGE, number=9, message="ExecStep")
+
+ pre_step = proto.Field(proto.MESSAGE, number=8, message="ExecStep",)
+
+ post_step = proto.Field(proto.MESSAGE, number=9, message="ExecStep",)
class Instance(proto.Message):
@@ -462,7 +524,7 @@ class AptSettings(proto.Message):
to control how this is executed.
Attributes:
- type (~.gco_patch_jobs.AptSettings.Type):
+ type_ (~.gco_patch_jobs.AptSettings.Type):
By changing the type to DIST, the patching is performed
using ``apt-get dist-upgrade`` instead.
excludes (Sequence[str]):
@@ -483,8 +545,10 @@ class Type(proto.Enum):
DIST = 1
UPGRADE = 2
- type = proto.Field(proto.ENUM, number=1, enum=Type)
+ type_ = proto.Field(proto.ENUM, number=1, enum=Type,)
+
excludes = proto.RepeatedField(proto.STRING, number=2)
+
exclusive_packages = proto.RepeatedField(proto.STRING, number=3)
@@ -513,8 +577,11 @@ class YumSettings(proto.Message):
"""
security = proto.Field(proto.BOOL, number=1)
+
minimal = proto.Field(proto.BOOL, number=2)
+
excludes = proto.RepeatedField(proto.STRING, number=3)
+
exclusive_packages = proto.RepeatedField(proto.STRING, number=4)
@@ -549,10 +616,15 @@ class ZypperSettings(proto.Message):
"""
with_optional = proto.Field(proto.BOOL, number=1)
+
with_update = proto.Field(proto.BOOL, number=2)
+
categories = proto.RepeatedField(proto.STRING, number=3)
+
severities = proto.RepeatedField(proto.STRING, number=4)
+
excludes = proto.RepeatedField(proto.STRING, number=5)
+
exclusive_patches = proto.RepeatedField(proto.STRING, number=6)
@@ -588,8 +660,10 @@ class Classification(proto.Enum):
UPDATE_ROLLUP = 8
UPDATE = 9
- classifications = proto.RepeatedField(proto.ENUM, number=1, enum=Classification)
+ classifications = proto.RepeatedField(proto.ENUM, number=1, enum=Classification,)
+
excludes = proto.RepeatedField(proto.STRING, number=2)
+
exclusive_patches = proto.RepeatedField(proto.STRING, number=3)
@@ -606,10 +680,11 @@ class ExecStep(proto.Message):
"""
linux_exec_step_config = proto.Field(
- proto.MESSAGE, number=1, message="ExecStepConfig"
+ proto.MESSAGE, number=1, message="ExecStepConfig",
)
+
windows_exec_step_config = proto.Field(
- proto.MESSAGE, number=2, message="ExecStepConfig"
+ proto.MESSAGE, number=2, message="ExecStepConfig",
)
@@ -639,10 +714,15 @@ class Interpreter(proto.Enum):
SHELL = 1
POWERSHELL = 2
- local_path = proto.Field(proto.STRING, number=1)
- gcs_object = proto.Field(proto.MESSAGE, number=2, message="GcsObject")
+ local_path = proto.Field(proto.STRING, number=1, oneof="executable")
+
+ gcs_object = proto.Field(
+ proto.MESSAGE, number=2, oneof="executable", message="GcsObject",
+ )
+
allowed_success_codes = proto.RepeatedField(proto.INT32, number=3)
- interpreter = proto.Field(proto.ENUM, number=4, enum=Interpreter)
+
+ interpreter = proto.Field(proto.ENUM, number=4, enum=Interpreter,)
class GcsObject(proto.Message):
@@ -651,7 +731,7 @@ class GcsObject(proto.Message):
Attributes:
bucket (str):
Required. Bucket of the Cloud Storage object.
- object (str):
+ object_ (str):
Required. Name of the Cloud Storage object.
generation_number (int):
Required. Generation number of the Cloud
@@ -661,7 +741,9 @@ class GcsObject(proto.Message):
"""
bucket = proto.Field(proto.STRING, number=1)
- object = proto.Field(proto.STRING, number=2)
+
+ object_ = proto.Field(proto.STRING, number=2)
+
generation_number = proto.Field(proto.INT64, number=3)
@@ -672,7 +754,7 @@ class PatchInstanceFilter(proto.Message):
labels and in those zones.
Attributes:
- all (bool):
+ all_ (bool):
Target all VM instances in the project. If
true, no other criteria is permitted.
group_labels (Sequence[~.gco_patch_jobs.PatchInstanceFilter.GroupLabel]):
@@ -685,7 +767,10 @@ class PatchInstanceFilter(proto.Message):
instances (Sequence[str]):
Targets any of the VM instances specified. Instances are
specified by their URI in the form
- ``zones/[ZONE]/instances/[INSTANCE_NAME],``\ projects/[PROJECT_ID]/zones/[ZONE]/instances/[INSTANCE_NAME]\ ``, or``\ https://www.googleapis.com/compute/v1/projects/[PROJECT_ID]/zones/[ZONE]/instances/[INSTANCE_NAME]\`
+ ``zones/[ZONE]/instances/[INSTANCE_NAME]``,
+ ``projects/[PROJECT_ID]/zones/[ZONE]/instances/[INSTANCE_NAME]``,
+ or
+ ``https://www.googleapis.com/compute/v1/projects/[PROJECT_ID]/zones/[ZONE]/instances/[INSTANCE_NAME]``
instance_name_prefixes (Sequence[str]):
Targets VMs whose name starts with one of
these prefixes. Similar to labels, this is
@@ -713,11 +798,67 @@ class GroupLabel(proto.Message):
labels = proto.MapField(proto.STRING, proto.STRING, number=1)
- all = proto.Field(proto.BOOL, number=1)
- group_labels = proto.RepeatedField(proto.MESSAGE, number=2, message=GroupLabel)
+ all_ = proto.Field(proto.BOOL, number=1)
+
+ group_labels = proto.RepeatedField(proto.MESSAGE, number=2, message=GroupLabel,)
+
zones = proto.RepeatedField(proto.STRING, number=3)
+
instances = proto.RepeatedField(proto.STRING, number=4)
+
instance_name_prefixes = proto.RepeatedField(proto.STRING, number=5)
+class PatchRollout(proto.Message):
+ r"""Patch rollout configuration specifications. Contains details
+ on the concurrency control when applying patch(es) to all
+ targeted VMs.
+
+ Attributes:
+ mode (~.gco_patch_jobs.PatchRollout.Mode):
+ Mode of the patch rollout.
+ disruption_budget (~.osconfig_common.FixedOrPercent):
+ The maximum number (or percentage) of VMs per zone to
+ disrupt at any given moment. The number of VMs calculated
+ from multiplying the percentage by the total number of VMs
+ in a zone is rounded up.
+
+ During patching, a VM is considered disrupted from the time
+ the agent is notified to begin until patching has completed.
+ This disruption time includes the time to complete reboot
+ and any post-patch steps.
+
+ A VM contributes to the disruption budget if its patching
+ operation fails either when applying the patches, running
+ pre or post patch steps, or if it fails to respond with a
+ success notification before timing out. VMs that are not
+ running or do not have an active agent do not count toward
+ this disruption budget.
+
+ For zone-by-zone rollouts, if the disruption budget in a
+ zone is exceeded, the patch job stops, because continuing to
+ the next zone requires completion of the patch process in
+ the previous zone.
+
+ For example, if the disruption budget has a fixed value of
+ ``10``, and 8 VMs fail to patch in the current zone, the
+ patch job continues to patch 2 VMs at a time until the zone
+ is completed. When that zone is completed successfully,
+ patching begins with 10 VMs at a time in the next zone. If
+ 10 VMs in the next zone fail to patch, the patch job stops.
+ """
+
+ class Mode(proto.Enum):
+ r"""Type of the rollout."""
+ MODE_UNSPECIFIED = 0
+ ZONE_BY_ZONE = 1
+ CONCURRENT_ZONES = 2
+
+ mode = proto.Field(proto.ENUM, number=1, enum=Mode,)
+
+ disruption_budget = proto.Field(
+ proto.MESSAGE, number=2, message=osconfig_common.FixedOrPercent,
+ )
+
+
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/noxfile.py b/noxfile.py
index a7fa38f..8e55141 100644
--- a/noxfile.py
+++ b/noxfile.py
@@ -23,12 +23,12 @@
import nox
-BLACK_VERSION = "black==19.3b0"
+BLACK_VERSION = "black==19.10b0"
BLACK_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"]
-DEFAULT_PYTHON_VERSION = "3.7"
-SYSTEM_TEST_PYTHON_VERSIONS = ["3.7"]
-UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8"]
+DEFAULT_PYTHON_VERSION = "3.8"
+SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"]
+UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9"]
@nox.session(python=DEFAULT_PYTHON_VERSION)
@@ -39,7 +39,9 @@ def lint(session):
serious code quality issues.
"""
session.install("flake8", BLACK_VERSION)
- session.run("black", "--check", *BLACK_PATHS)
+ session.run(
+ "black", "--check", *BLACK_PATHS,
+ )
session.run("flake8", "google", "tests")
@@ -54,7 +56,9 @@ def blacken(session):
check the state of the `gcp_ubuntu_config` we use for that Kokoro run.
"""
session.install(BLACK_VERSION)
- session.run("black", *BLACK_PATHS)
+ session.run(
+ "black", *BLACK_PATHS,
+ )
@nox.session(python=DEFAULT_PYTHON_VERSION)
@@ -67,7 +71,10 @@ def lint_setup_py(session):
def default(session):
# Install all test dependencies, then install this package in-place.
session.install("asyncmock", "pytest-asyncio")
- session.install("mock", "pytest", "pytest-cov")
+
+ session.install(
+ "mock", "pytest", "pytest-cov",
+ )
session.install("-e", ".")
# Run py.test against the unit tests.
@@ -97,6 +104,10 @@ def system(session):
"""Run the system test suite."""
system_test_path = os.path.join("tests", "system.py")
system_test_folder_path = os.path.join("tests", "system")
+
+ # Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true.
+ if os.environ.get("RUN_SYSTEM_TESTS", "true") == "false":
+ session.skip("RUN_SYSTEM_TESTS is set to false, skipping")
# Sanity check: Only run tests if the environment variable is set.
if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""):
session.skip("Credentials must be set via environment variable")
@@ -112,7 +123,9 @@ def system(session):
# Install all test dependencies, then install this package into the
# virtualenv's dist-packages.
- session.install("mock", "pytest", "google-cloud-testutils")
+ session.install(
+ "mock", "pytest", "google-cloud-testutils",
+ )
session.install("-e", ".")
# Run py.test against the system tests.
@@ -155,3 +168,38 @@ def docs(session):
os.path.join("docs", ""),
os.path.join("docs", "_build", "html", ""),
)
+
+
+@nox.session(python=DEFAULT_PYTHON_VERSION)
+def docfx(session):
+ """Build the docfx yaml files for this library."""
+
+ session.install("-e", ".")
+ # sphinx-docfx-yaml supports up to sphinx version 1.5.5.
+ # https://github.com/docascode/sphinx-docfx-yaml/issues/97
+ session.install("sphinx==1.5.5", "alabaster", "recommonmark", "sphinx-docfx-yaml")
+
+ shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
+ session.run(
+ "sphinx-build",
+ "-T", # show full traceback on exception
+ "-N", # no colors
+ "-D",
+ (
+ "extensions=sphinx.ext.autodoc,"
+ "sphinx.ext.autosummary,"
+ "docfx_yaml.extension,"
+ "sphinx.ext.intersphinx,"
+ "sphinx.ext.coverage,"
+ "sphinx.ext.napoleon,"
+ "sphinx.ext.todo,"
+ "sphinx.ext.viewcode,"
+ "recommonmark"
+ ),
+ "-b",
+ "html",
+ "-d",
+ os.path.join("docs", "_build", "doctrees", ""),
+ os.path.join("docs", ""),
+ os.path.join("docs", "_build", "html", ""),
+ )
diff --git a/scripts/decrypt-secrets.sh b/scripts/decrypt-secrets.sh
index ff599eb..21f6d2a 100755
--- a/scripts/decrypt-secrets.sh
+++ b/scripts/decrypt-secrets.sh
@@ -20,14 +20,27 @@ ROOT=$( dirname "$DIR" )
# Work from the project root.
cd $ROOT
+# Prevent it from overriding files.
+# We recommend that sample authors use their own service account files and cloud project.
+# In that case, they are supposed to prepare these files by themselves.
+if [[ -f "testing/test-env.sh" ]] || \
+ [[ -f "testing/service-account.json" ]] || \
+ [[ -f "testing/client-secrets.json" ]]; then
+ echo "One or more target files exist, aborting."
+ exit 1
+fi
+
# Use SECRET_MANAGER_PROJECT if set, fallback to cloud-devrel-kokoro-resources.
PROJECT_ID="${SECRET_MANAGER_PROJECT:-cloud-devrel-kokoro-resources}"
gcloud secrets versions access latest --secret="python-docs-samples-test-env" \
+ --project="${PROJECT_ID}" \
> testing/test-env.sh
gcloud secrets versions access latest \
--secret="python-docs-samples-service-account" \
+ --project="${PROJECT_ID}" \
> testing/service-account.json
gcloud secrets versions access latest \
--secret="python-docs-samples-client-secrets" \
- > testing/client-secrets.json
\ No newline at end of file
+ --project="${PROJECT_ID}" \
+ > testing/client-secrets.json
diff --git a/scripts/fixup_osconfig_v1_keywords.py b/scripts/fixup_osconfig_v1_keywords.py
new file mode 100644
index 0000000..9f7025c
--- /dev/null
+++ b/scripts/fixup_osconfig_v1_keywords.py
@@ -0,0 +1,187 @@
+#! /usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import argparse
+import os
+import libcst as cst
+import pathlib
+import sys
+from typing import (Any, Callable, Dict, List, Sequence, Tuple)
+
+
+def partition(
+ predicate: Callable[[Any], bool],
+ iterator: Sequence[Any]
+) -> Tuple[List[Any], List[Any]]:
+ """A stable, out-of-place partition."""
+ results = ([], [])
+
+ for i in iterator:
+ results[int(predicate(i))].append(i)
+
+ # Returns trueList, falseList
+ return results[1], results[0]
+
+
+class osconfigCallTransformer(cst.CSTTransformer):
+ CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
+ METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
+ 'cancel_patch_job': ('name', ),
+ 'create_patch_deployment': ('parent', 'patch_deployment_id', 'patch_deployment', ),
+ 'delete_patch_deployment': ('name', ),
+ 'execute_patch_job': ('parent', 'instance_filter', 'description', 'patch_config', 'duration', 'dry_run', 'display_name', 'rollout', ),
+ 'get_patch_deployment': ('name', ),
+ 'get_patch_job': ('name', ),
+ 'list_patch_deployments': ('parent', 'page_size', 'page_token', ),
+ 'list_patch_job_instance_details': ('parent', 'page_size', 'page_token', 'filter', ),
+ 'list_patch_jobs': ('parent', 'page_size', 'page_token', 'filter', ),
+
+ }
+
+ def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
+ try:
+ key = original.func.attr.value
+ kword_params = self.METHOD_TO_PARAMS[key]
+ except (AttributeError, KeyError):
+ # Either not a method from the API or too convoluted to be sure.
+ return updated
+
+ # If the existing code is valid, keyword args come after positional args.
+ # Therefore, all positional args must map to the first parameters.
+ args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
+ if any(k.keyword.value == "request" for k in kwargs):
+ # We've already fixed this file, don't fix it again.
+ return updated
+
+ kwargs, ctrl_kwargs = partition(
+ lambda a: not a.keyword.value in self.CTRL_PARAMS,
+ kwargs
+ )
+
+ args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
+ ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
+ for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
+
+ request_arg = cst.Arg(
+ value=cst.Dict([
+ cst.DictElement(
+ cst.SimpleString("'{}'".format(name)),
+ cst.Element(value=arg.value)
+ )
+ # Note: the args + kwargs looks silly, but keep in mind that
+ # the control parameters had to be stripped out, and that
+ # those could have been passed positionally or by keyword.
+ for name, arg in zip(kword_params, args + kwargs)]),
+ keyword=cst.Name("request")
+ )
+
+ return updated.with_changes(
+ args=[request_arg] + ctrl_kwargs
+ )
+
+
+def fix_files(
+ in_dir: pathlib.Path,
+ out_dir: pathlib.Path,
+ *,
+ transformer=osconfigCallTransformer(),
+):
+ """Duplicate the input dir to the output dir, fixing file method calls.
+
+ Preconditions:
+ * in_dir is a real directory
+ * out_dir is a real, empty directory
+ """
+ pyfile_gen = (
+ pathlib.Path(os.path.join(root, f))
+ for root, _, files in os.walk(in_dir)
+ for f in files if os.path.splitext(f)[1] == ".py"
+ )
+
+ for fpath in pyfile_gen:
+ with open(fpath, 'r') as f:
+ src = f.read()
+
+ # Parse the code and insert method call fixes.
+ tree = cst.parse_module(src)
+ updated = tree.visit(transformer)
+
+ # Create the path and directory structure for the new file.
+ updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
+ updated_path.parent.mkdir(parents=True, exist_ok=True)
+
+ # Generate the updated source file at the corresponding path.
+ with open(updated_path, 'w') as f:
+ f.write(updated.code)
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(
+ description="""Fix up source that uses the osconfig client library.
+
+The existing sources are NOT overwritten but are copied to output_dir with changes made.
+
+Note: This tool operates at a best-effort level at converting positional
+ parameters in client method calls to keyword based parameters.
+ Cases where it WILL FAIL include
+ A) * or ** expansion in a method call.
+ B) Calls via function or method alias (includes free function calls)
+ C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
+
+ These all constitute false negatives. The tool will also detect false
+ positives when an API method shares a name with another method.
+""")
+ parser.add_argument(
+ '-d',
+ '--input-directory',
+ required=True,
+ dest='input_dir',
+ help='the input directory to walk for python files to fix up',
+ )
+ parser.add_argument(
+ '-o',
+ '--output-directory',
+ required=True,
+ dest='output_dir',
+ help='the directory to output files fixed via un-flattening',
+ )
+ args = parser.parse_args()
+ input_dir = pathlib.Path(args.input_dir)
+ output_dir = pathlib.Path(args.output_dir)
+ if not input_dir.is_dir():
+ print(
+ f"input directory '{input_dir}' does not exist or is not a directory",
+ file=sys.stderr,
+ )
+ sys.exit(-1)
+
+ if not output_dir.is_dir():
+ print(
+ f"output directory '{output_dir}' does not exist or is not a directory",
+ file=sys.stderr,
+ )
+ sys.exit(-1)
+
+ if os.listdir(output_dir):
+ print(
+ f"output directory '{output_dir}' is not empty",
+ file=sys.stderr,
+ )
+ sys.exit(-1)
+
+ fix_files(input_dir, output_dir)
diff --git a/setup.py b/setup.py
index 379f5bb..8fddd69 100644
--- a/setup.py
+++ b/setup.py
@@ -31,8 +31,8 @@
# 'Development Status :: 5 - Production/Stable'
release_status = "Development Status :: 4 - Beta"
dependencies = [
- "google-api-core[grpc] >= 1.17.2, < 2.0.0dev",
- "proto-plus >= 0.4.0",
+ "google-api-core[grpc] >= 1.22.0, < 2.0.0dev",
+ "proto-plus >= 1.10.0",
"libcst >= 0.2.5",
]
extras = {}
diff --git a/synth.metadata b/synth.metadata
index 020d69a..3a49120 100644
--- a/synth.metadata
+++ b/synth.metadata
@@ -3,30 +3,30 @@
{
"git": {
"name": ".",
- "remote": "https://github.com/googleapis/python-os-config.git",
- "sha": "b43ea51c28470874fdf5da026970202009d48651"
+ "remote": "git@github.com:googleapis/python-os-config",
+ "sha": "164c39adc34e029c814142c3a8dc43c22654a0b0"
}
},
{
"git": {
"name": "googleapis",
"remote": "https://github.com/googleapis/googleapis.git",
- "sha": "ca1372c6d7bcb199638ebfdb40d2b2660bab7b88",
- "internalRef": "315548189"
+ "sha": "3f87da2ed1ddc3566ef0810c4fc06a2682cc9f5f",
+ "internalRef": "343022252"
}
},
{
"git": {
"name": "synthtool",
"remote": "https://github.com/googleapis/synthtool.git",
- "sha": "cd522c3b4dde821766d95c80ae5aeb43d7a41170"
+ "sha": "7fcc405a579d5d53a726ff3da1b7c8c08f0f2d58"
}
},
{
"git": {
"name": "synthtool",
"remote": "https://github.com/googleapis/synthtool.git",
- "sha": "cd522c3b4dde821766d95c80ae5aeb43d7a41170"
+ "sha": "7fcc405a579d5d53a726ff3da1b7c8c08f0f2d58"
}
}
],
diff --git a/synth.py b/synth.py
index d850f7a..9ae99d4 100644
--- a/synth.py
+++ b/synth.py
@@ -31,25 +31,14 @@
s.move(library, excludes=["nox.py", "setup.py", "README.rst", "docs/index.rst"])
-# correct license headers
-python.fix_pb2_headers()
-python.fix_pb2_grpc_headers()
-
# rename to google-cloud-os-config
s.replace(["google/**/*.py", "tests/**/*.py"], "google-cloud-osconfig", "google-cloud-os-config")
# Add newline after last item in list
-s.replace("google/cloud/**/*_client.py",
+s.replace("google/cloud/**/*client.py",
"(- Must be unique within the project\.)",
"\g<1>\n")
-# Add missing blank line before Attributes: in generated docstrings
-# https://github.com/googleapis/protoc-docs-plugin/pull/31
-s.replace(
- "google/cloud/pubsub_v1/proto/pubsub_pb2.py",
- "(\s+)Attributes:",
- "\n\g<1>Attributes:"
-)
# ----------------------------------------------------------------------------
# Add templated files
@@ -57,8 +46,6 @@
templated_files = common.py_library(
samples=False,
microgenerator=True,
- unit_test_python_versions=["3.6", "3.7", "3.8"],
- system_test_python_versions=["3.7"],
)
s.move(
templated_files, excludes=[".coveragerc"]
diff --git a/tests/system/test_os_config_service_v1.py b/tests/system/test_os_config_service_v1.py
index 2bc3c37..c5ced16 100644
--- a/tests/system/test_os_config_service_v1.py
+++ b/tests/system/test_os_config_service_v1.py
@@ -30,7 +30,7 @@ def test_patch_job(self):
request = patch_jobs.ExecutePatchJobRequest(
parent=f"projects/{project_id}",
description="Python Client Library System Test",
- instance_filter=patch_jobs.PatchInstanceFilter(all=True),
+ instance_filter=patch_jobs.PatchInstanceFilter(all_=True),
)
patch_job = client.execute_patch_job(request)
assert patch_job is not None
@@ -60,7 +60,7 @@ def test_patch_deployment(self):
client = OsConfigServiceClient()
patch_deployment = patch_deployments.PatchDeployment(
- instance_filter=patch_jobs.PatchInstanceFilter(all=True),
+ instance_filter=patch_jobs.PatchInstanceFilter(all_=True),
one_time_schedule=patch_deployments.OneTimeSchedule(
execute_time=timestamp.Timestamp(seconds=200000000000)
),
diff --git a/tests/unit/gapic/osconfig_v1/__init__.py b/tests/unit/gapic/osconfig_v1/__init__.py
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/tests/unit/gapic/osconfig_v1/__init__.py
@@ -0,0 +1 @@
+
diff --git a/tests/unit/gapic/osconfig_v1/test_os_config_service.py b/tests/unit/gapic/osconfig_v1/test_os_config_service.py
index 823f1ab..cbd0460 100644
--- a/tests/unit/gapic/osconfig_v1/test_os_config_service.py
+++ b/tests/unit/gapic/osconfig_v1/test_os_config_service.py
@@ -22,9 +22,12 @@
from grpc.experimental import aio
import math
import pytest
+from proto.marshal.rules.dates import DurationRule, TimestampRule
from google import auth
from google.api_core import client_options
+from google.api_core import exceptions
+from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.auth import credentials
@@ -35,9 +38,9 @@
from google.cloud.osconfig_v1.services.os_config_service import OsConfigServiceClient
from google.cloud.osconfig_v1.services.os_config_service import pagers
from google.cloud.osconfig_v1.services.os_config_service import transports
+from google.cloud.osconfig_v1.types import osconfig_common
from google.cloud.osconfig_v1.types import patch_deployments
from google.cloud.osconfig_v1.types import patch_jobs
-from google.cloud.osconfig_v1.types import patch_jobs as gco_patch_jobs
from google.oauth2 import service_account
from google.protobuf import duration_pb2 as duration # type: ignore
from google.protobuf import timestamp_pb2 as timestamp # type: ignore
@@ -50,6 +53,17 @@ def client_cert_source_callback():
return b"cert bytes", b"key bytes"
+# If default endpoint is localhost, then default mtls endpoint will be the same.
+# This method modifies the default endpoint so the client can produce a different
+# mtls endpoint for endpoint testing purposes.
+def modify_default_endpoint(client):
+ return (
+ "foo.googleapis.com"
+ if ("localhost" in client.DEFAULT_ENDPOINT)
+ else client.DEFAULT_ENDPOINT
+ )
+
+
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
@@ -89,12 +103,12 @@ def test_os_config_service_client_from_service_account_file(client_class):
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
- assert client._transport._credentials == creds
+ assert client.transport._credentials == creds
client = client_class.from_service_account_json("dummy/file/path.json")
- assert client._transport._credentials == creds
+ assert client.transport._credentials == creds
- assert client._transport._host == "osconfig.googleapis.com:443"
+ assert client.transport._host == "osconfig.googleapis.com:443"
def test_os_config_service_client_get_transport_class():
@@ -116,6 +130,16 @@ def test_os_config_service_client_get_transport_class():
),
],
)
+@mock.patch.object(
+ OsConfigServiceClient,
+ "DEFAULT_ENDPOINT",
+ modify_default_endpoint(OsConfigServiceClient),
+)
+@mock.patch.object(
+ OsConfigServiceAsyncClient,
+ "DEFAULT_ENDPOINT",
+ modify_default_endpoint(OsConfigServiceAsyncClient),
+)
def test_os_config_service_client_client_options(
client_class, transport_class, transport_name
):
@@ -136,96 +160,282 @@ def test_os_config_service_client_client_options(
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
- api_mtls_endpoint="squid.clam.whelk",
- client_cert_source=None,
credentials=None,
+ credentials_file=None,
host="squid.clam.whelk",
+ scopes=None,
+ ssl_channel_credentials=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
)
- # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS is
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
- os.environ["GOOGLE_API_USE_MTLS"] = "never"
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class()
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ ssl_channel_credentials=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ )
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
+ # "always".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class()
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client.DEFAULT_MTLS_ENDPOINT,
+ scopes=None,
+ ssl_channel_credentials=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ )
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
+ # unsupported value.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
+ with pytest.raises(MutualTLSChannelError):
+ client = client_class()
+
+ # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
+ ):
+ with pytest.raises(ValueError):
+ client = client_class()
+
+ # Check the case quota_project_id is provided
+ options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
- client = client_class()
+ client = client_class(client_options=options)
patched.assert_called_once_with(
- api_mtls_endpoint=client.DEFAULT_ENDPOINT,
- client_cert_source=None,
credentials=None,
+ credentials_file=None,
host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ ssl_channel_credentials=None,
+ quota_project_id="octopus",
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
)
- # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS is
- # "always".
- os.environ["GOOGLE_API_USE_MTLS"] = "always"
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,use_client_cert_env",
+ [
+ (
+ OsConfigServiceClient,
+ transports.OsConfigServiceGrpcTransport,
+ "grpc",
+ "true",
+ ),
+ (
+ OsConfigServiceAsyncClient,
+ transports.OsConfigServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ "true",
+ ),
+ (
+ OsConfigServiceClient,
+ transports.OsConfigServiceGrpcTransport,
+ "grpc",
+ "false",
+ ),
+ (
+ OsConfigServiceAsyncClient,
+ transports.OsConfigServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ "false",
+ ),
+ ],
+)
+@mock.patch.object(
+ OsConfigServiceClient,
+ "DEFAULT_ENDPOINT",
+ modify_default_endpoint(OsConfigServiceClient),
+)
+@mock.patch.object(
+ OsConfigServiceAsyncClient,
+ "DEFAULT_ENDPOINT",
+ modify_default_endpoint(OsConfigServiceAsyncClient),
+)
+@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
+def test_os_config_service_client_mtls_env_auto(
+ client_class, transport_class, transport_name, use_client_cert_env
+):
+ # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
+ # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
+
+ # Check the case client_cert_source is provided. Whether client cert is used depends on
+ # GOOGLE_API_USE_CLIENT_CERTIFICATE value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
+ ):
+ options = client_options.ClientOptions(
+ client_cert_source=client_cert_source_callback
+ )
+ with mock.patch.object(transport_class, "__init__") as patched:
+ ssl_channel_creds = mock.Mock()
+ with mock.patch(
+ "grpc.ssl_channel_credentials", return_value=ssl_channel_creds
+ ):
+ patched.return_value = None
+ client = client_class(client_options=options)
+
+ if use_client_cert_env == "false":
+ expected_ssl_channel_creds = None
+ expected_host = client.DEFAULT_ENDPOINT
+ else:
+ expected_ssl_channel_creds = ssl_channel_creds
+ expected_host = client.DEFAULT_MTLS_ENDPOINT
+
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=expected_host,
+ scopes=None,
+ ssl_channel_credentials=expected_ssl_channel_creds,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ )
+
+ # Check the case ADC client cert is provided. Whether client cert is used depends on
+ # GOOGLE_API_USE_CLIENT_CERTIFICATE value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
+ ):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ with mock.patch(
+ "google.auth.transport.grpc.SslCredentials.__init__", return_value=None
+ ):
+ with mock.patch(
+ "google.auth.transport.grpc.SslCredentials.is_mtls",
+ new_callable=mock.PropertyMock,
+ ) as is_mtls_mock:
+ with mock.patch(
+ "google.auth.transport.grpc.SslCredentials.ssl_credentials",
+ new_callable=mock.PropertyMock,
+ ) as ssl_credentials_mock:
+ if use_client_cert_env == "false":
+ is_mtls_mock.return_value = False
+ ssl_credentials_mock.return_value = None
+ expected_host = client.DEFAULT_ENDPOINT
+ expected_ssl_channel_creds = None
+ else:
+ is_mtls_mock.return_value = True
+ ssl_credentials_mock.return_value = mock.Mock()
+ expected_host = client.DEFAULT_MTLS_ENDPOINT
+ expected_ssl_channel_creds = (
+ ssl_credentials_mock.return_value
+ )
+
+ patched.return_value = None
+ client = client_class()
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=expected_host,
+ scopes=None,
+ ssl_channel_credentials=expected_ssl_channel_creds,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ )
+
+ # Check the case client_cert_source and ADC client cert are not provided.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
+ ):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ with mock.patch(
+ "google.auth.transport.grpc.SslCredentials.__init__", return_value=None
+ ):
+ with mock.patch(
+ "google.auth.transport.grpc.SslCredentials.is_mtls",
+ new_callable=mock.PropertyMock,
+ ) as is_mtls_mock:
+ is_mtls_mock.return_value = False
+ patched.return_value = None
+ client = client_class()
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ ssl_channel_credentials=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name",
+ [
+ (OsConfigServiceClient, transports.OsConfigServiceGrpcTransport, "grpc"),
+ (
+ OsConfigServiceAsyncClient,
+ transports.OsConfigServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ ),
+ ],
+)
+def test_os_config_service_client_client_options_scopes(
+ client_class, transport_class, transport_name
+):
+ # Check the case scopes are provided.
+ options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
- client = client_class()
+ client = client_class(client_options=options)
patched.assert_called_once_with(
- api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT,
- client_cert_source=None,
credentials=None,
- host=client.DEFAULT_MTLS_ENDPOINT,
+ credentials_file=None,
+ host=client.DEFAULT_ENDPOINT,
+ scopes=["1", "2"],
+ ssl_channel_credentials=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
)
- # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is
- # "auto", and client_cert_source is provided.
- os.environ["GOOGLE_API_USE_MTLS"] = "auto"
- options = client_options.ClientOptions(
- client_cert_source=client_cert_source_callback
- )
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name",
+ [
+ (OsConfigServiceClient, transports.OsConfigServiceGrpcTransport, "grpc"),
+ (
+ OsConfigServiceAsyncClient,
+ transports.OsConfigServiceGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ ),
+ ],
+)
+def test_os_config_service_client_client_options_credentials_file(
+ client_class, transport_class, transport_name
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
- api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT,
- client_cert_source=client_cert_source_callback,
credentials=None,
- host=client.DEFAULT_MTLS_ENDPOINT,
+ credentials_file="credentials.json",
+ host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ ssl_channel_credentials=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
)
- # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is
- # "auto", and default_client_cert_source is provided.
- os.environ["GOOGLE_API_USE_MTLS"] = "auto"
- with mock.patch.object(transport_class, "__init__") as patched:
- with mock.patch(
- "google.auth.transport.mtls.has_default_client_cert_source",
- return_value=True,
- ):
- patched.return_value = None
- client = client_class()
- patched.assert_called_once_with(
- api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT,
- client_cert_source=None,
- credentials=None,
- host=client.DEFAULT_MTLS_ENDPOINT,
- )
-
- # Check the case api_endpoint is not provided, GOOGLE_API_USE_MTLS is
- # "auto", but client_cert_source and default_client_cert_source are None.
- os.environ["GOOGLE_API_USE_MTLS"] = "auto"
- with mock.patch.object(transport_class, "__init__") as patched:
- with mock.patch(
- "google.auth.transport.mtls.has_default_client_cert_source",
- return_value=False,
- ):
- patched.return_value = None
- client = client_class()
- patched.assert_called_once_with(
- api_mtls_endpoint=client.DEFAULT_ENDPOINT,
- client_cert_source=None,
- credentials=None,
- host=client.DEFAULT_ENDPOINT,
- )
-
- # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS has
- # unsupported value.
- os.environ["GOOGLE_API_USE_MTLS"] = "Unsupported"
- with pytest.raises(MutualTLSChannelError):
- client = client_class()
-
- del os.environ["GOOGLE_API_USE_MTLS"]
-
def test_os_config_service_client_client_options_from_dict():
with mock.patch(
@@ -236,25 +446,30 @@ def test_os_config_service_client_client_options_from_dict():
client_options={"api_endpoint": "squid.clam.whelk"}
)
grpc_transport.assert_called_once_with(
- api_mtls_endpoint="squid.clam.whelk",
- client_cert_source=None,
credentials=None,
+ credentials_file=None,
host="squid.clam.whelk",
+ scopes=None,
+ ssl_channel_credentials=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
)
-def test_execute_patch_job(transport: str = "grpc"):
+def test_execute_patch_job(
+ transport: str = "grpc", request_type=patch_jobs.ExecutePatchJobRequest
+):
client = OsConfigServiceClient(
- credentials=credentials.AnonymousCredentials(), transport=transport
+ credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = patch_jobs.ExecutePatchJobRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.execute_patch_job), "__call__"
+ type(client.transport.execute_patch_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = patch_jobs.PatchJob(
@@ -274,34 +489,48 @@ def test_execute_patch_job(transport: str = "grpc"):
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == patch_jobs.ExecutePatchJobRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, patch_jobs.PatchJob)
+
assert response.name == "name_value"
+
assert response.display_name == "display_name_value"
+
assert response.description == "description_value"
+
assert response.state == patch_jobs.PatchJob.State.STARTED
assert response.dry_run is True
+
assert response.error_message == "error_message_value"
+
assert math.isclose(response.percent_complete, 0.1705, rel_tol=1e-6)
+
assert response.patch_deployment == "patch_deployment_value"
+def test_execute_patch_job_from_dict():
+ test_execute_patch_job(request_type=dict)
+
+
@pytest.mark.asyncio
-async def test_execute_patch_job_async(transport: str = "grpc_asyncio"):
+async def test_execute_patch_job_async(
+ transport: str = "grpc_asyncio", request_type=patch_jobs.ExecutePatchJobRequest
+):
client = OsConfigServiceAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport
+ credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = patch_jobs.ExecutePatchJobRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.execute_patch_job), "__call__"
+ type(client.transport.execute_patch_job), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
@@ -323,23 +552,35 @@ async def test_execute_patch_job_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == patch_jobs.ExecutePatchJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, patch_jobs.PatchJob)
+
assert response.name == "name_value"
+
assert response.display_name == "display_name_value"
+
assert response.description == "description_value"
+
assert response.state == patch_jobs.PatchJob.State.STARTED
assert response.dry_run is True
+
assert response.error_message == "error_message_value"
+
assert math.isclose(response.percent_complete, 0.1705, rel_tol=1e-6)
+
assert response.patch_deployment == "patch_deployment_value"
+@pytest.mark.asyncio
+async def test_execute_patch_job_async_from_dict():
+ await test_execute_patch_job_async(request_type=dict)
+
+
def test_execute_patch_job_field_headers():
- client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials())
+ client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
@@ -348,7 +589,7 @@ def test_execute_patch_job_field_headers():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.execute_patch_job), "__call__"
+ type(client.transport.execute_patch_job), "__call__"
) as call:
call.return_value = patch_jobs.PatchJob()
@@ -361,12 +602,12 @@ def test_execute_patch_job_field_headers():
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
- assert ("x-goog-request-params", "parent=parent/value") in kw["metadata"]
+ assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_execute_patch_job_field_headers_async():
- client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials())
+ client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
@@ -375,7 +616,7 @@ async def test_execute_patch_job_field_headers_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.execute_patch_job), "__call__"
+ type(client.transport.execute_patch_job), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(patch_jobs.PatchJob())
@@ -388,20 +629,22 @@ async def test_execute_patch_job_field_headers_async():
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
- assert ("x-goog-request-params", "parent=parent/value") in kw["metadata"]
+ assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
-def test_get_patch_job(transport: str = "grpc"):
+def test_get_patch_job(
+ transport: str = "grpc", request_type=patch_jobs.GetPatchJobRequest
+):
client = OsConfigServiceClient(
- credentials=credentials.AnonymousCredentials(), transport=transport
+ credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = patch_jobs.GetPatchJobRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.get_patch_job), "__call__") as call:
+ with mock.patch.object(type(client.transport.get_patch_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = patch_jobs.PatchJob(
name="name_value",
@@ -420,35 +663,47 @@ def test_get_patch_job(transport: str = "grpc"):
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == patch_jobs.GetPatchJobRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, patch_jobs.PatchJob)
+
assert response.name == "name_value"
+
assert response.display_name == "display_name_value"
+
assert response.description == "description_value"
+
assert response.state == patch_jobs.PatchJob.State.STARTED
assert response.dry_run is True
+
assert response.error_message == "error_message_value"
+
assert math.isclose(response.percent_complete, 0.1705, rel_tol=1e-6)
+
assert response.patch_deployment == "patch_deployment_value"
+def test_get_patch_job_from_dict():
+ test_get_patch_job(request_type=dict)
+
+
@pytest.mark.asyncio
-async def test_get_patch_job_async(transport: str = "grpc_asyncio"):
+async def test_get_patch_job_async(
+ transport: str = "grpc_asyncio", request_type=patch_jobs.GetPatchJobRequest
+):
client = OsConfigServiceAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport
+ credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = patch_jobs.GetPatchJobRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.get_patch_job), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.get_patch_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
patch_jobs.PatchJob(
@@ -469,23 +724,35 @@ async def test_get_patch_job_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == patch_jobs.GetPatchJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, patch_jobs.PatchJob)
+
assert response.name == "name_value"
+
assert response.display_name == "display_name_value"
+
assert response.description == "description_value"
+
assert response.state == patch_jobs.PatchJob.State.STARTED
assert response.dry_run is True
+
assert response.error_message == "error_message_value"
+
assert math.isclose(response.percent_complete, 0.1705, rel_tol=1e-6)
+
assert response.patch_deployment == "patch_deployment_value"
+@pytest.mark.asyncio
+async def test_get_patch_job_async_from_dict():
+ await test_get_patch_job_async(request_type=dict)
+
+
def test_get_patch_job_field_headers():
- client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials())
+ client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
@@ -493,7 +760,7 @@ def test_get_patch_job_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.get_patch_job), "__call__") as call:
+ with mock.patch.object(type(client.transport.get_patch_job), "__call__") as call:
call.return_value = patch_jobs.PatchJob()
client.get_patch_job(request)
@@ -505,12 +772,12 @@ def test_get_patch_job_field_headers():
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
- assert ("x-goog-request-params", "name=name/value") in kw["metadata"]
+ assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_patch_job_field_headers_async():
- client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials())
+ client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
@@ -518,9 +785,7 @@ async def test_get_patch_job_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.get_patch_job), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.get_patch_job), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(patch_jobs.PatchJob())
await client.get_patch_job(request)
@@ -532,83 +797,87 @@ async def test_get_patch_job_field_headers_async():
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
- assert ("x-goog-request-params", "name=name/value") in kw["metadata"]
+ assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_patch_job_flattened():
- client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials())
+ client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.get_patch_job), "__call__") as call:
+ with mock.patch.object(type(client.transport.get_patch_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = patch_jobs.PatchJob()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
- client.get_patch_job(name="name_value")
+ client.get_patch_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
+
assert args[0].name == "name_value"
def test_get_patch_job_flattened_error():
- client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials())
+ client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
- client.get_patch_job(patch_jobs.GetPatchJobRequest(), name="name_value")
+ client.get_patch_job(
+ patch_jobs.GetPatchJobRequest(), name="name_value",
+ )
@pytest.mark.asyncio
async def test_get_patch_job_flattened_async():
- client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials())
+ client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.get_patch_job), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.get_patch_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = patch_jobs.PatchJob()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(patch_jobs.PatchJob())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
- response = await client.get_patch_job(name="name_value")
+ response = await client.get_patch_job(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
+
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_get_patch_job_flattened_error_async():
- client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials())
+ client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
- await client.get_patch_job(patch_jobs.GetPatchJobRequest(), name="name_value")
+ await client.get_patch_job(
+ patch_jobs.GetPatchJobRequest(), name="name_value",
+ )
-def test_cancel_patch_job(transport: str = "grpc"):
+def test_cancel_patch_job(
+ transport: str = "grpc", request_type=patch_jobs.CancelPatchJobRequest
+):
client = OsConfigServiceClient(
- credentials=credentials.AnonymousCredentials(), transport=transport
+ credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = patch_jobs.CancelPatchJobRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._transport.cancel_patch_job), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.cancel_patch_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = patch_jobs.PatchJob(
name="name_value",
@@ -627,35 +896,47 @@ def test_cancel_patch_job(transport: str = "grpc"):
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == patch_jobs.CancelPatchJobRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, patch_jobs.PatchJob)
+
assert response.name == "name_value"
+
assert response.display_name == "display_name_value"
+
assert response.description == "description_value"
+
assert response.state == patch_jobs.PatchJob.State.STARTED
assert response.dry_run is True
+
assert response.error_message == "error_message_value"
+
assert math.isclose(response.percent_complete, 0.1705, rel_tol=1e-6)
+
assert response.patch_deployment == "patch_deployment_value"
+def test_cancel_patch_job_from_dict():
+ test_cancel_patch_job(request_type=dict)
+
+
@pytest.mark.asyncio
-async def test_cancel_patch_job_async(transport: str = "grpc_asyncio"):
+async def test_cancel_patch_job_async(
+ transport: str = "grpc_asyncio", request_type=patch_jobs.CancelPatchJobRequest
+):
client = OsConfigServiceAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport
+ credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = patch_jobs.CancelPatchJobRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.cancel_patch_job), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.cancel_patch_job), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
patch_jobs.PatchJob(
@@ -676,23 +957,35 @@ async def test_cancel_patch_job_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == patch_jobs.CancelPatchJobRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, patch_jobs.PatchJob)
+
assert response.name == "name_value"
+
assert response.display_name == "display_name_value"
+
assert response.description == "description_value"
+
assert response.state == patch_jobs.PatchJob.State.STARTED
assert response.dry_run is True
+
assert response.error_message == "error_message_value"
+
assert math.isclose(response.percent_complete, 0.1705, rel_tol=1e-6)
+
assert response.patch_deployment == "patch_deployment_value"
+@pytest.mark.asyncio
+async def test_cancel_patch_job_async_from_dict():
+ await test_cancel_patch_job_async(request_type=dict)
+
+
def test_cancel_patch_job_field_headers():
- client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials())
+ client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
@@ -700,9 +993,7 @@ def test_cancel_patch_job_field_headers():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._transport.cancel_patch_job), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.cancel_patch_job), "__call__") as call:
call.return_value = patch_jobs.PatchJob()
client.cancel_patch_job(request)
@@ -714,12 +1005,12 @@ def test_cancel_patch_job_field_headers():
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
- assert ("x-goog-request-params", "name=name/value") in kw["metadata"]
+ assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_cancel_patch_job_field_headers_async():
- client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials())
+ client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
@@ -727,9 +1018,7 @@ async def test_cancel_patch_job_field_headers_async():
request.name = "name/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.cancel_patch_job), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.cancel_patch_job), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(patch_jobs.PatchJob())
await client.cancel_patch_job(request)
@@ -741,23 +1030,25 @@ async def test_cancel_patch_job_field_headers_async():
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
- assert ("x-goog-request-params", "name=name/value") in kw["metadata"]
+ assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
-def test_list_patch_jobs(transport: str = "grpc"):
+def test_list_patch_jobs(
+ transport: str = "grpc", request_type=patch_jobs.ListPatchJobsRequest
+):
client = OsConfigServiceClient(
- credentials=credentials.AnonymousCredentials(), transport=transport
+ credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = patch_jobs.ListPatchJobsRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_patch_jobs), "__call__") as call:
+ with mock.patch.object(type(client.transport.list_patch_jobs), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = patch_jobs.ListPatchJobsResponse(
- next_page_token="next_page_token_value"
+ next_page_token="next_page_token_value",
)
response = client.list_patch_jobs(request)
@@ -766,30 +1057,36 @@ def test_list_patch_jobs(transport: str = "grpc"):
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == patch_jobs.ListPatchJobsRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, pagers.ListPatchJobsPager)
+
assert response.next_page_token == "next_page_token_value"
+def test_list_patch_jobs_from_dict():
+ test_list_patch_jobs(request_type=dict)
+
+
@pytest.mark.asyncio
-async def test_list_patch_jobs_async(transport: str = "grpc_asyncio"):
+async def test_list_patch_jobs_async(
+ transport: str = "grpc_asyncio", request_type=patch_jobs.ListPatchJobsRequest
+):
client = OsConfigServiceAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport
+ credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = patch_jobs.ListPatchJobsRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.list_patch_jobs), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.list_patch_jobs), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
- patch_jobs.ListPatchJobsResponse(next_page_token="next_page_token_value")
+ patch_jobs.ListPatchJobsResponse(next_page_token="next_page_token_value",)
)
response = await client.list_patch_jobs(request)
@@ -798,15 +1095,21 @@ async def test_list_patch_jobs_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == patch_jobs.ListPatchJobsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListPatchJobsAsyncPager)
+
assert response.next_page_token == "next_page_token_value"
+@pytest.mark.asyncio
+async def test_list_patch_jobs_async_from_dict():
+ await test_list_patch_jobs_async(request_type=dict)
+
+
def test_list_patch_jobs_field_headers():
- client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials())
+ client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
@@ -814,7 +1117,7 @@ def test_list_patch_jobs_field_headers():
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_patch_jobs), "__call__") as call:
+ with mock.patch.object(type(client.transport.list_patch_jobs), "__call__") as call:
call.return_value = patch_jobs.ListPatchJobsResponse()
client.list_patch_jobs(request)
@@ -826,12 +1129,12 @@ def test_list_patch_jobs_field_headers():
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
- assert ("x-goog-request-params", "parent=parent/value") in kw["metadata"]
+ assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_patch_jobs_field_headers_async():
- client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials())
+ client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
@@ -839,9 +1142,7 @@ async def test_list_patch_jobs_field_headers_async():
request.parent = "parent/value"
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.list_patch_jobs), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.list_patch_jobs), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
patch_jobs.ListPatchJobsResponse()
)
@@ -855,45 +1156,46 @@ async def test_list_patch_jobs_field_headers_async():
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
- assert ("x-goog-request-params", "parent=parent/value") in kw["metadata"]
+ assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_patch_jobs_flattened():
- client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials())
+ client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_patch_jobs), "__call__") as call:
+ with mock.patch.object(type(client.transport.list_patch_jobs), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = patch_jobs.ListPatchJobsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
- client.list_patch_jobs(parent="parent_value")
+ client.list_patch_jobs(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
+
assert args[0].parent == "parent_value"
def test_list_patch_jobs_flattened_error():
- client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials())
+ client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
- client.list_patch_jobs(patch_jobs.ListPatchJobsRequest(), parent="parent_value")
+ client.list_patch_jobs(
+ patch_jobs.ListPatchJobsRequest(), parent="parent_value",
+ )
@pytest.mark.asyncio
async def test_list_patch_jobs_flattened_async():
- client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials())
+ client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._client._transport.list_patch_jobs), "__call__"
- ) as call:
+ with mock.patch.object(type(client.transport.list_patch_jobs), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = patch_jobs.ListPatchJobsResponse()
@@ -902,32 +1204,33 @@ async def test_list_patch_jobs_flattened_async():
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
- response = await client.list_patch_jobs(parent="parent_value")
+ response = await client.list_patch_jobs(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
+
assert args[0].parent == "parent_value"
@pytest.mark.asyncio
async def test_list_patch_jobs_flattened_error_async():
- client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials())
+ client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_patch_jobs(
- patch_jobs.ListPatchJobsRequest(), parent="parent_value"
+ patch_jobs.ListPatchJobsRequest(), parent="parent_value",
)
def test_list_patch_jobs_pager():
- client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials)
+ client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_patch_jobs), "__call__") as call:
+ with mock.patch.object(type(client.transport.list_patch_jobs), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
patch_jobs.ListPatchJobsResponse(
@@ -938,25 +1241,34 @@ def test_list_patch_jobs_pager():
],
next_page_token="abc",
),
- patch_jobs.ListPatchJobsResponse(patch_jobs=[], next_page_token="def"),
+ patch_jobs.ListPatchJobsResponse(patch_jobs=[], next_page_token="def",),
patch_jobs.ListPatchJobsResponse(
- patch_jobs=[patch_jobs.PatchJob()], next_page_token="ghi"
+ patch_jobs=[patch_jobs.PatchJob(),], next_page_token="ghi",
),
patch_jobs.ListPatchJobsResponse(
- patch_jobs=[patch_jobs.PatchJob(), patch_jobs.PatchJob()]
+ patch_jobs=[patch_jobs.PatchJob(), patch_jobs.PatchJob(),],
),
RuntimeError,
)
- results = [i for i in client.list_patch_jobs(request={})]
+
+ metadata = ()
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
+ )
+ pager = client.list_patch_jobs(request={})
+
+ assert pager._metadata == metadata
+
+ results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, patch_jobs.PatchJob) for i in results)
def test_list_patch_jobs_pages():
- client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials)
+ client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_patch_jobs), "__call__") as call:
+ with mock.patch.object(type(client.transport.list_patch_jobs), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
patch_jobs.ListPatchJobsResponse(
@@ -967,29 +1279,27 @@ def test_list_patch_jobs_pages():
],
next_page_token="abc",
),
- patch_jobs.ListPatchJobsResponse(patch_jobs=[], next_page_token="def"),
+ patch_jobs.ListPatchJobsResponse(patch_jobs=[], next_page_token="def",),
patch_jobs.ListPatchJobsResponse(
- patch_jobs=[patch_jobs.PatchJob()], next_page_token="ghi"
+ patch_jobs=[patch_jobs.PatchJob(),], next_page_token="ghi",
),
patch_jobs.ListPatchJobsResponse(
- patch_jobs=[patch_jobs.PatchJob(), patch_jobs.PatchJob()]
+ patch_jobs=[patch_jobs.PatchJob(), patch_jobs.PatchJob(),],
),
RuntimeError,
)
pages = list(client.list_patch_jobs(request={}).pages)
- for page, token in zip(pages, ["abc", "def", "ghi", ""]):
- assert page.raw_page.next_page_token == token
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_patch_jobs_async_pager():
- client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials)
+ client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.list_patch_jobs),
- "__call__",
- new_callable=mock.AsyncMock,
+ type(client.transport.list_patch_jobs), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
@@ -1001,16 +1311,16 @@ async def test_list_patch_jobs_async_pager():
],
next_page_token="abc",
),
- patch_jobs.ListPatchJobsResponse(patch_jobs=[], next_page_token="def"),
+ patch_jobs.ListPatchJobsResponse(patch_jobs=[], next_page_token="def",),
patch_jobs.ListPatchJobsResponse(
- patch_jobs=[patch_jobs.PatchJob()], next_page_token="ghi"
+ patch_jobs=[patch_jobs.PatchJob(),], next_page_token="ghi",
),
patch_jobs.ListPatchJobsResponse(
- patch_jobs=[patch_jobs.PatchJob(), patch_jobs.PatchJob()]
+ patch_jobs=[patch_jobs.PatchJob(), patch_jobs.PatchJob(),],
),
RuntimeError,
)
- async_pager = await client.list_patch_jobs(request={})
+ async_pager = await client.list_patch_jobs(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
@@ -1022,13 +1332,11 @@ async def test_list_patch_jobs_async_pager():
@pytest.mark.asyncio
async def test_list_patch_jobs_async_pages():
- client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials)
+ client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.list_patch_jobs),
- "__call__",
- new_callable=mock.AsyncMock,
+ type(client.transport.list_patch_jobs), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
@@ -1040,38 +1348,40 @@ async def test_list_patch_jobs_async_pages():
],
next_page_token="abc",
),
- patch_jobs.ListPatchJobsResponse(patch_jobs=[], next_page_token="def"),
+ patch_jobs.ListPatchJobsResponse(patch_jobs=[], next_page_token="def",),
patch_jobs.ListPatchJobsResponse(
- patch_jobs=[patch_jobs.PatchJob()], next_page_token="ghi"
+ patch_jobs=[patch_jobs.PatchJob(),], next_page_token="ghi",
),
patch_jobs.ListPatchJobsResponse(
- patch_jobs=[patch_jobs.PatchJob(), patch_jobs.PatchJob()]
+ patch_jobs=[patch_jobs.PatchJob(), patch_jobs.PatchJob(),],
),
RuntimeError,
)
pages = []
- async for page in (await client.list_patch_jobs(request={})).pages:
- pages.append(page)
- for page, token in zip(pages, ["abc", "def", "ghi", ""]):
- assert page.raw_page.next_page_token == token
+ async for page_ in (await client.list_patch_jobs(request={})).pages:
+ pages.append(page_)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
-def test_list_patch_job_instance_details(transport: str = "grpc"):
+def test_list_patch_job_instance_details(
+ transport: str = "grpc", request_type=patch_jobs.ListPatchJobInstanceDetailsRequest
+):
client = OsConfigServiceClient(
- credentials=credentials.AnonymousCredentials(), transport=transport
+ credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = patch_jobs.ListPatchJobInstanceDetailsRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.list_patch_job_instance_details), "__call__"
+ type(client.transport.list_patch_job_instance_details), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = patch_jobs.ListPatchJobInstanceDetailsResponse(
- next_page_token="next_page_token_value"
+ next_page_token="next_page_token_value",
)
response = client.list_patch_job_instance_details(request)
@@ -1080,31 +1390,40 @@ def test_list_patch_job_instance_details(transport: str = "grpc"):
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == patch_jobs.ListPatchJobInstanceDetailsRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, pagers.ListPatchJobInstanceDetailsPager)
+
assert response.next_page_token == "next_page_token_value"
+def test_list_patch_job_instance_details_from_dict():
+ test_list_patch_job_instance_details(request_type=dict)
+
+
@pytest.mark.asyncio
-async def test_list_patch_job_instance_details_async(transport: str = "grpc_asyncio"):
+async def test_list_patch_job_instance_details_async(
+ transport: str = "grpc_asyncio",
+ request_type=patch_jobs.ListPatchJobInstanceDetailsRequest,
+):
client = OsConfigServiceAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport
+ credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = patch_jobs.ListPatchJobInstanceDetailsRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.list_patch_job_instance_details), "__call__"
+ type(client.transport.list_patch_job_instance_details), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
patch_jobs.ListPatchJobInstanceDetailsResponse(
- next_page_token="next_page_token_value"
+ next_page_token="next_page_token_value",
)
)
@@ -1114,15 +1433,21 @@ async def test_list_patch_job_instance_details_async(transport: str = "grpc_asyn
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == patch_jobs.ListPatchJobInstanceDetailsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListPatchJobInstanceDetailsAsyncPager)
+
assert response.next_page_token == "next_page_token_value"
+@pytest.mark.asyncio
+async def test_list_patch_job_instance_details_async_from_dict():
+ await test_list_patch_job_instance_details_async(request_type=dict)
+
+
def test_list_patch_job_instance_details_field_headers():
- client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials())
+ client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
@@ -1131,7 +1456,7 @@ def test_list_patch_job_instance_details_field_headers():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.list_patch_job_instance_details), "__call__"
+ type(client.transport.list_patch_job_instance_details), "__call__"
) as call:
call.return_value = patch_jobs.ListPatchJobInstanceDetailsResponse()
@@ -1144,12 +1469,12 @@ def test_list_patch_job_instance_details_field_headers():
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
- assert ("x-goog-request-params", "parent=parent/value") in kw["metadata"]
+ assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_patch_job_instance_details_field_headers_async():
- client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials())
+ client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
@@ -1158,7 +1483,7 @@ async def test_list_patch_job_instance_details_field_headers_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.list_patch_job_instance_details), "__call__"
+ type(client.transport.list_patch_job_instance_details), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
patch_jobs.ListPatchJobInstanceDetailsResponse()
@@ -1173,48 +1498,49 @@ async def test_list_patch_job_instance_details_field_headers_async():
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
- assert ("x-goog-request-params", "parent=parent/value") in kw["metadata"]
+ assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_patch_job_instance_details_flattened():
- client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials())
+ client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.list_patch_job_instance_details), "__call__"
+ type(client.transport.list_patch_job_instance_details), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = patch_jobs.ListPatchJobInstanceDetailsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
- client.list_patch_job_instance_details(parent="parent_value")
+ client.list_patch_job_instance_details(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
+
assert args[0].parent == "parent_value"
def test_list_patch_job_instance_details_flattened_error():
- client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials())
+ client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_patch_job_instance_details(
- patch_jobs.ListPatchJobInstanceDetailsRequest(), parent="parent_value"
+ patch_jobs.ListPatchJobInstanceDetailsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_patch_job_instance_details_flattened_async():
- client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials())
+ client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.list_patch_job_instance_details), "__call__"
+ type(client.transport.list_patch_job_instance_details), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = patch_jobs.ListPatchJobInstanceDetailsResponse()
@@ -1224,33 +1550,34 @@ async def test_list_patch_job_instance_details_flattened_async():
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
- response = await client.list_patch_job_instance_details(parent="parent_value")
+ response = await client.list_patch_job_instance_details(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
+
assert args[0].parent == "parent_value"
@pytest.mark.asyncio
async def test_list_patch_job_instance_details_flattened_error_async():
- client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials())
+ client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_patch_job_instance_details(
- patch_jobs.ListPatchJobInstanceDetailsRequest(), parent="parent_value"
+ patch_jobs.ListPatchJobInstanceDetailsRequest(), parent="parent_value",
)
def test_list_patch_job_instance_details_pager():
- client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials)
+ client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.list_patch_job_instance_details), "__call__"
+ type(client.transport.list_patch_job_instance_details), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
@@ -1263,31 +1590,40 @@ def test_list_patch_job_instance_details_pager():
next_page_token="abc",
),
patch_jobs.ListPatchJobInstanceDetailsResponse(
- patch_job_instance_details=[], next_page_token="def"
+ patch_job_instance_details=[], next_page_token="def",
),
patch_jobs.ListPatchJobInstanceDetailsResponse(
- patch_job_instance_details=[patch_jobs.PatchJobInstanceDetails()],
+ patch_job_instance_details=[patch_jobs.PatchJobInstanceDetails(),],
next_page_token="ghi",
),
patch_jobs.ListPatchJobInstanceDetailsResponse(
patch_job_instance_details=[
patch_jobs.PatchJobInstanceDetails(),
patch_jobs.PatchJobInstanceDetails(),
- ]
+ ],
),
RuntimeError,
)
- results = [i for i in client.list_patch_job_instance_details(request={})]
+
+ metadata = ()
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
+ )
+ pager = client.list_patch_job_instance_details(request={})
+
+ assert pager._metadata == metadata
+
+ results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, patch_jobs.PatchJobInstanceDetails) for i in results)
def test_list_patch_job_instance_details_pages():
- client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials)
+ client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.list_patch_job_instance_details), "__call__"
+ type(client.transport.list_patch_job_instance_details), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
@@ -1300,32 +1636,32 @@ def test_list_patch_job_instance_details_pages():
next_page_token="abc",
),
patch_jobs.ListPatchJobInstanceDetailsResponse(
- patch_job_instance_details=[], next_page_token="def"
+ patch_job_instance_details=[], next_page_token="def",
),
patch_jobs.ListPatchJobInstanceDetailsResponse(
- patch_job_instance_details=[patch_jobs.PatchJobInstanceDetails()],
+ patch_job_instance_details=[patch_jobs.PatchJobInstanceDetails(),],
next_page_token="ghi",
),
patch_jobs.ListPatchJobInstanceDetailsResponse(
patch_job_instance_details=[
patch_jobs.PatchJobInstanceDetails(),
patch_jobs.PatchJobInstanceDetails(),
- ]
+ ],
),
RuntimeError,
)
pages = list(client.list_patch_job_instance_details(request={}).pages)
- for page, token in zip(pages, ["abc", "def", "ghi", ""]):
- assert page.raw_page.next_page_token == token
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_patch_job_instance_details_async_pager():
- client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials)
+ client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.list_patch_job_instance_details),
+ type(client.transport.list_patch_job_instance_details),
"__call__",
new_callable=mock.AsyncMock,
) as call:
@@ -1340,21 +1676,21 @@ async def test_list_patch_job_instance_details_async_pager():
next_page_token="abc",
),
patch_jobs.ListPatchJobInstanceDetailsResponse(
- patch_job_instance_details=[], next_page_token="def"
+ patch_job_instance_details=[], next_page_token="def",
),
patch_jobs.ListPatchJobInstanceDetailsResponse(
- patch_job_instance_details=[patch_jobs.PatchJobInstanceDetails()],
+ patch_job_instance_details=[patch_jobs.PatchJobInstanceDetails(),],
next_page_token="ghi",
),
patch_jobs.ListPatchJobInstanceDetailsResponse(
patch_job_instance_details=[
patch_jobs.PatchJobInstanceDetails(),
patch_jobs.PatchJobInstanceDetails(),
- ]
+ ],
),
RuntimeError,
)
- async_pager = await client.list_patch_job_instance_details(request={})
+ async_pager = await client.list_patch_job_instance_details(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
@@ -1366,11 +1702,11 @@ async def test_list_patch_job_instance_details_async_pager():
@pytest.mark.asyncio
async def test_list_patch_job_instance_details_async_pages():
- client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials)
+ client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.list_patch_job_instance_details),
+ type(client.transport.list_patch_job_instance_details),
"__call__",
new_callable=mock.AsyncMock,
) as call:
@@ -1385,45 +1721,51 @@ async def test_list_patch_job_instance_details_async_pages():
next_page_token="abc",
),
patch_jobs.ListPatchJobInstanceDetailsResponse(
- patch_job_instance_details=[], next_page_token="def"
+ patch_job_instance_details=[], next_page_token="def",
),
patch_jobs.ListPatchJobInstanceDetailsResponse(
- patch_job_instance_details=[patch_jobs.PatchJobInstanceDetails()],
+ patch_job_instance_details=[patch_jobs.PatchJobInstanceDetails(),],
next_page_token="ghi",
),
patch_jobs.ListPatchJobInstanceDetailsResponse(
patch_job_instance_details=[
patch_jobs.PatchJobInstanceDetails(),
patch_jobs.PatchJobInstanceDetails(),
- ]
+ ],
),
RuntimeError,
)
pages = []
- async for page in (
+ async for page_ in (
await client.list_patch_job_instance_details(request={})
).pages:
- pages.append(page)
- for page, token in zip(pages, ["abc", "def", "ghi", ""]):
- assert page.raw_page.next_page_token == token
+ pages.append(page_)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
-def test_create_patch_deployment(transport: str = "grpc"):
+def test_create_patch_deployment(
+ transport: str = "grpc", request_type=patch_deployments.CreatePatchDeploymentRequest
+):
client = OsConfigServiceClient(
- credentials=credentials.AnonymousCredentials(), transport=transport
+ credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = patch_deployments.CreatePatchDeploymentRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.create_patch_deployment), "__call__"
+ type(client.transport.create_patch_deployment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = patch_deployments.PatchDeployment(
- name="name_value", description="description_value"
+ name="name_value",
+ description="description_value",
+ one_time_schedule=patch_deployments.OneTimeSchedule(
+ execute_time=timestamp.Timestamp(seconds=751)
+ ),
)
response = client.create_patch_deployment(request)
@@ -1432,32 +1774,42 @@ def test_create_patch_deployment(transport: str = "grpc"):
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == patch_deployments.CreatePatchDeploymentRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, patch_deployments.PatchDeployment)
+
assert response.name == "name_value"
+
assert response.description == "description_value"
+def test_create_patch_deployment_from_dict():
+ test_create_patch_deployment(request_type=dict)
+
+
@pytest.mark.asyncio
-async def test_create_patch_deployment_async(transport: str = "grpc_asyncio"):
+async def test_create_patch_deployment_async(
+ transport: str = "grpc_asyncio",
+ request_type=patch_deployments.CreatePatchDeploymentRequest,
+):
client = OsConfigServiceAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport
+ credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = patch_deployments.CreatePatchDeploymentRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.create_patch_deployment), "__call__"
+ type(client.transport.create_patch_deployment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
patch_deployments.PatchDeployment(
- name="name_value", description="description_value"
+ name="name_value", description="description_value",
)
)
@@ -1467,16 +1819,23 @@ async def test_create_patch_deployment_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == patch_deployments.CreatePatchDeploymentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, patch_deployments.PatchDeployment)
+
assert response.name == "name_value"
+
assert response.description == "description_value"
+@pytest.mark.asyncio
+async def test_create_patch_deployment_async_from_dict():
+ await test_create_patch_deployment_async(request_type=dict)
+
+
def test_create_patch_deployment_field_headers():
- client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials())
+ client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
@@ -1485,7 +1844,7 @@ def test_create_patch_deployment_field_headers():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.create_patch_deployment), "__call__"
+ type(client.transport.create_patch_deployment), "__call__"
) as call:
call.return_value = patch_deployments.PatchDeployment()
@@ -1498,12 +1857,12 @@ def test_create_patch_deployment_field_headers():
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
- assert ("x-goog-request-params", "parent=parent/value") in kw["metadata"]
+ assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_patch_deployment_field_headers_async():
- client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials())
+ client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
@@ -1512,7 +1871,7 @@ async def test_create_patch_deployment_field_headers_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.create_patch_deployment), "__call__"
+ type(client.transport.create_patch_deployment), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
patch_deployments.PatchDeployment()
@@ -1527,15 +1886,15 @@ async def test_create_patch_deployment_field_headers_async():
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
- assert ("x-goog-request-params", "parent=parent/value") in kw["metadata"]
+ assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_create_patch_deployment_flattened():
- client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials())
+ client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.create_patch_deployment), "__call__"
+ type(client.transport.create_patch_deployment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = patch_deployments.PatchDeployment()
@@ -1552,15 +1911,18 @@ def test_create_patch_deployment_flattened():
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
+
assert args[0].parent == "parent_value"
+
assert args[0].patch_deployment == patch_deployments.PatchDeployment(
name="name_value"
)
+
assert args[0].patch_deployment_id == "patch_deployment_id_value"
def test_create_patch_deployment_flattened_error():
- client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials())
+ client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -1575,11 +1937,11 @@ def test_create_patch_deployment_flattened_error():
@pytest.mark.asyncio
async def test_create_patch_deployment_flattened_async():
- client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials())
+ client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.create_patch_deployment), "__call__"
+ type(client.transport.create_patch_deployment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = patch_deployments.PatchDeployment()
@@ -1599,16 +1961,19 @@ async def test_create_patch_deployment_flattened_async():
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
+
assert args[0].parent == "parent_value"
+
assert args[0].patch_deployment == patch_deployments.PatchDeployment(
name="name_value"
)
+
assert args[0].patch_deployment_id == "patch_deployment_id_value"
@pytest.mark.asyncio
async def test_create_patch_deployment_flattened_error_async():
- client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials())
+ client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
@@ -1621,22 +1986,28 @@ async def test_create_patch_deployment_flattened_error_async():
)
-def test_get_patch_deployment(transport: str = "grpc"):
+def test_get_patch_deployment(
+ transport: str = "grpc", request_type=patch_deployments.GetPatchDeploymentRequest
+):
client = OsConfigServiceClient(
- credentials=credentials.AnonymousCredentials(), transport=transport
+ credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = patch_deployments.GetPatchDeploymentRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.get_patch_deployment), "__call__"
+ type(client.transport.get_patch_deployment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = patch_deployments.PatchDeployment(
- name="name_value", description="description_value"
+ name="name_value",
+ description="description_value",
+ one_time_schedule=patch_deployments.OneTimeSchedule(
+ execute_time=timestamp.Timestamp(seconds=751)
+ ),
)
response = client.get_patch_deployment(request)
@@ -1645,32 +2016,42 @@ def test_get_patch_deployment(transport: str = "grpc"):
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == patch_deployments.GetPatchDeploymentRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, patch_deployments.PatchDeployment)
+
assert response.name == "name_value"
+
assert response.description == "description_value"
+def test_get_patch_deployment_from_dict():
+ test_get_patch_deployment(request_type=dict)
+
+
@pytest.mark.asyncio
-async def test_get_patch_deployment_async(transport: str = "grpc_asyncio"):
+async def test_get_patch_deployment_async(
+ transport: str = "grpc_asyncio",
+ request_type=patch_deployments.GetPatchDeploymentRequest,
+):
client = OsConfigServiceAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport
+ credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = patch_deployments.GetPatchDeploymentRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.get_patch_deployment), "__call__"
+ type(client.transport.get_patch_deployment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
patch_deployments.PatchDeployment(
- name="name_value", description="description_value"
+ name="name_value", description="description_value",
)
)
@@ -1680,16 +2061,23 @@ async def test_get_patch_deployment_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == patch_deployments.GetPatchDeploymentRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, patch_deployments.PatchDeployment)
+
assert response.name == "name_value"
+
assert response.description == "description_value"
+@pytest.mark.asyncio
+async def test_get_patch_deployment_async_from_dict():
+ await test_get_patch_deployment_async(request_type=dict)
+
+
def test_get_patch_deployment_field_headers():
- client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials())
+ client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
@@ -1698,7 +2086,7 @@ def test_get_patch_deployment_field_headers():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.get_patch_deployment), "__call__"
+ type(client.transport.get_patch_deployment), "__call__"
) as call:
call.return_value = patch_deployments.PatchDeployment()
@@ -1711,12 +2099,12 @@ def test_get_patch_deployment_field_headers():
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
- assert ("x-goog-request-params", "name=name/value") in kw["metadata"]
+ assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_patch_deployment_field_headers_async():
- client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials())
+ client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
@@ -1725,7 +2113,7 @@ async def test_get_patch_deployment_field_headers_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.get_patch_deployment), "__call__"
+ type(client.transport.get_patch_deployment), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
patch_deployments.PatchDeployment()
@@ -1740,48 +2128,49 @@ async def test_get_patch_deployment_field_headers_async():
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
- assert ("x-goog-request-params", "name=name/value") in kw["metadata"]
+ assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_get_patch_deployment_flattened():
- client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials())
+ client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.get_patch_deployment), "__call__"
+ type(client.transport.get_patch_deployment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = patch_deployments.PatchDeployment()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
- client.get_patch_deployment(name="name_value")
+ client.get_patch_deployment(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
+
assert args[0].name == "name_value"
def test_get_patch_deployment_flattened_error():
- client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials())
+ client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_patch_deployment(
- patch_deployments.GetPatchDeploymentRequest(), name="name_value"
+ patch_deployments.GetPatchDeploymentRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_get_patch_deployment_flattened_async():
- client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials())
+ client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.get_patch_deployment), "__call__"
+ type(client.transport.get_patch_deployment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = patch_deployments.PatchDeployment()
@@ -1791,43 +2180,46 @@ async def test_get_patch_deployment_flattened_async():
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
- response = await client.get_patch_deployment(name="name_value")
+ response = await client.get_patch_deployment(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
+
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_get_patch_deployment_flattened_error_async():
- client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials())
+ client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_patch_deployment(
- patch_deployments.GetPatchDeploymentRequest(), name="name_value"
+ patch_deployments.GetPatchDeploymentRequest(), name="name_value",
)
-def test_list_patch_deployments(transport: str = "grpc"):
+def test_list_patch_deployments(
+ transport: str = "grpc", request_type=patch_deployments.ListPatchDeploymentsRequest
+):
client = OsConfigServiceClient(
- credentials=credentials.AnonymousCredentials(), transport=transport
+ credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = patch_deployments.ListPatchDeploymentsRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.list_patch_deployments), "__call__"
+ type(client.transport.list_patch_deployments), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = patch_deployments.ListPatchDeploymentsResponse(
- next_page_token="next_page_token_value"
+ next_page_token="next_page_token_value",
)
response = client.list_patch_deployments(request)
@@ -1836,31 +2228,40 @@ def test_list_patch_deployments(transport: str = "grpc"):
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == patch_deployments.ListPatchDeploymentsRequest()
# Establish that the response is the type that we expect.
+
assert isinstance(response, pagers.ListPatchDeploymentsPager)
+
assert response.next_page_token == "next_page_token_value"
+def test_list_patch_deployments_from_dict():
+ test_list_patch_deployments(request_type=dict)
+
+
@pytest.mark.asyncio
-async def test_list_patch_deployments_async(transport: str = "grpc_asyncio"):
+async def test_list_patch_deployments_async(
+ transport: str = "grpc_asyncio",
+ request_type=patch_deployments.ListPatchDeploymentsRequest,
+):
client = OsConfigServiceAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport
+ credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = patch_deployments.ListPatchDeploymentsRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.list_patch_deployments), "__call__"
+ type(client.transport.list_patch_deployments), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
patch_deployments.ListPatchDeploymentsResponse(
- next_page_token="next_page_token_value"
+ next_page_token="next_page_token_value",
)
)
@@ -1870,15 +2271,21 @@ async def test_list_patch_deployments_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == patch_deployments.ListPatchDeploymentsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListPatchDeploymentsAsyncPager)
+
assert response.next_page_token == "next_page_token_value"
+@pytest.mark.asyncio
+async def test_list_patch_deployments_async_from_dict():
+ await test_list_patch_deployments_async(request_type=dict)
+
+
def test_list_patch_deployments_field_headers():
- client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials())
+ client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
@@ -1887,7 +2294,7 @@ def test_list_patch_deployments_field_headers():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.list_patch_deployments), "__call__"
+ type(client.transport.list_patch_deployments), "__call__"
) as call:
call.return_value = patch_deployments.ListPatchDeploymentsResponse()
@@ -1900,12 +2307,12 @@ def test_list_patch_deployments_field_headers():
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
- assert ("x-goog-request-params", "parent=parent/value") in kw["metadata"]
+ assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_patch_deployments_field_headers_async():
- client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials())
+ client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
@@ -1914,7 +2321,7 @@ async def test_list_patch_deployments_field_headers_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.list_patch_deployments), "__call__"
+ type(client.transport.list_patch_deployments), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
patch_deployments.ListPatchDeploymentsResponse()
@@ -1929,48 +2336,49 @@ async def test_list_patch_deployments_field_headers_async():
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
- assert ("x-goog-request-params", "parent=parent/value") in kw["metadata"]
+ assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
def test_list_patch_deployments_flattened():
- client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials())
+ client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.list_patch_deployments), "__call__"
+ type(client.transport.list_patch_deployments), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = patch_deployments.ListPatchDeploymentsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
- client.list_patch_deployments(parent="parent_value")
+ client.list_patch_deployments(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
+
assert args[0].parent == "parent_value"
def test_list_patch_deployments_flattened_error():
- client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials())
+ client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_patch_deployments(
- patch_deployments.ListPatchDeploymentsRequest(), parent="parent_value"
+ patch_deployments.ListPatchDeploymentsRequest(), parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_patch_deployments_flattened_async():
- client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials())
+ client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.list_patch_deployments), "__call__"
+ type(client.transport.list_patch_deployments), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = patch_deployments.ListPatchDeploymentsResponse()
@@ -1980,33 +2388,34 @@ async def test_list_patch_deployments_flattened_async():
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
- response = await client.list_patch_deployments(parent="parent_value")
+ response = await client.list_patch_deployments(parent="parent_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
+
assert args[0].parent == "parent_value"
@pytest.mark.asyncio
async def test_list_patch_deployments_flattened_error_async():
- client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials())
+ client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_patch_deployments(
- patch_deployments.ListPatchDeploymentsRequest(), parent="parent_value"
+ patch_deployments.ListPatchDeploymentsRequest(), parent="parent_value",
)
def test_list_patch_deployments_pager():
- client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials)
+ client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.list_patch_deployments), "__call__"
+ type(client.transport.list_patch_deployments), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
@@ -2019,31 +2428,40 @@ def test_list_patch_deployments_pager():
next_page_token="abc",
),
patch_deployments.ListPatchDeploymentsResponse(
- patch_deployments=[], next_page_token="def"
+ patch_deployments=[], next_page_token="def",
),
patch_deployments.ListPatchDeploymentsResponse(
- patch_deployments=[patch_deployments.PatchDeployment()],
+ patch_deployments=[patch_deployments.PatchDeployment(),],
next_page_token="ghi",
),
patch_deployments.ListPatchDeploymentsResponse(
patch_deployments=[
patch_deployments.PatchDeployment(),
patch_deployments.PatchDeployment(),
- ]
+ ],
),
RuntimeError,
)
- results = [i for i in client.list_patch_deployments(request={})]
+
+ metadata = ()
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
+ )
+ pager = client.list_patch_deployments(request={})
+
+ assert pager._metadata == metadata
+
+ results = [i for i in pager]
assert len(results) == 6
assert all(isinstance(i, patch_deployments.PatchDeployment) for i in results)
def test_list_patch_deployments_pages():
- client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials)
+ client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.list_patch_deployments), "__call__"
+ type(client.transport.list_patch_deployments), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
@@ -2056,32 +2474,32 @@ def test_list_patch_deployments_pages():
next_page_token="abc",
),
patch_deployments.ListPatchDeploymentsResponse(
- patch_deployments=[], next_page_token="def"
+ patch_deployments=[], next_page_token="def",
),
patch_deployments.ListPatchDeploymentsResponse(
- patch_deployments=[patch_deployments.PatchDeployment()],
+ patch_deployments=[patch_deployments.PatchDeployment(),],
next_page_token="ghi",
),
patch_deployments.ListPatchDeploymentsResponse(
patch_deployments=[
patch_deployments.PatchDeployment(),
patch_deployments.PatchDeployment(),
- ]
+ ],
),
RuntimeError,
)
pages = list(client.list_patch_deployments(request={}).pages)
- for page, token in zip(pages, ["abc", "def", "ghi", ""]):
- assert page.raw_page.next_page_token == token
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_patch_deployments_async_pager():
- client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials)
+ client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.list_patch_deployments),
+ type(client.transport.list_patch_deployments),
"__call__",
new_callable=mock.AsyncMock,
) as call:
@@ -2096,21 +2514,21 @@ async def test_list_patch_deployments_async_pager():
next_page_token="abc",
),
patch_deployments.ListPatchDeploymentsResponse(
- patch_deployments=[], next_page_token="def"
+ patch_deployments=[], next_page_token="def",
),
patch_deployments.ListPatchDeploymentsResponse(
- patch_deployments=[patch_deployments.PatchDeployment()],
+ patch_deployments=[patch_deployments.PatchDeployment(),],
next_page_token="ghi",
),
patch_deployments.ListPatchDeploymentsResponse(
patch_deployments=[
patch_deployments.PatchDeployment(),
patch_deployments.PatchDeployment(),
- ]
+ ],
),
RuntimeError,
)
- async_pager = await client.list_patch_deployments(request={})
+ async_pager = await client.list_patch_deployments(request={},)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager:
@@ -2122,11 +2540,11 @@ async def test_list_patch_deployments_async_pager():
@pytest.mark.asyncio
async def test_list_patch_deployments_async_pages():
- client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials)
+ client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials,)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.list_patch_deployments),
+ type(client.transport.list_patch_deployments),
"__call__",
new_callable=mock.AsyncMock,
) as call:
@@ -2141,39 +2559,41 @@ async def test_list_patch_deployments_async_pages():
next_page_token="abc",
),
patch_deployments.ListPatchDeploymentsResponse(
- patch_deployments=[], next_page_token="def"
+ patch_deployments=[], next_page_token="def",
),
patch_deployments.ListPatchDeploymentsResponse(
- patch_deployments=[patch_deployments.PatchDeployment()],
+ patch_deployments=[patch_deployments.PatchDeployment(),],
next_page_token="ghi",
),
patch_deployments.ListPatchDeploymentsResponse(
patch_deployments=[
patch_deployments.PatchDeployment(),
patch_deployments.PatchDeployment(),
- ]
+ ],
),
RuntimeError,
)
pages = []
- async for page in (await client.list_patch_deployments(request={})).pages:
- pages.append(page)
- for page, token in zip(pages, ["abc", "def", "ghi", ""]):
- assert page.raw_page.next_page_token == token
+ async for page_ in (await client.list_patch_deployments(request={})).pages:
+ pages.append(page_)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
-def test_delete_patch_deployment(transport: str = "grpc"):
+def test_delete_patch_deployment(
+ transport: str = "grpc", request_type=patch_deployments.DeletePatchDeploymentRequest
+):
client = OsConfigServiceClient(
- credentials=credentials.AnonymousCredentials(), transport=transport
+ credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = patch_deployments.DeletePatchDeploymentRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.delete_patch_deployment), "__call__"
+ type(client.transport.delete_patch_deployment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
@@ -2184,25 +2604,32 @@ def test_delete_patch_deployment(transport: str = "grpc"):
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == patch_deployments.DeletePatchDeploymentRequest()
# Establish that the response is the type that we expect.
assert response is None
+def test_delete_patch_deployment_from_dict():
+ test_delete_patch_deployment(request_type=dict)
+
+
@pytest.mark.asyncio
-async def test_delete_patch_deployment_async(transport: str = "grpc_asyncio"):
+async def test_delete_patch_deployment_async(
+ transport: str = "grpc_asyncio",
+ request_type=patch_deployments.DeletePatchDeploymentRequest,
+):
client = OsConfigServiceAsyncClient(
- credentials=credentials.AnonymousCredentials(), transport=transport
+ credentials=credentials.AnonymousCredentials(), transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
- request = patch_deployments.DeletePatchDeploymentRequest()
+ request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.delete_patch_deployment), "__call__"
+ type(client.transport.delete_patch_deployment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
@@ -2213,14 +2640,19 @@ async def test_delete_patch_deployment_async(transport: str = "grpc_asyncio"):
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
- assert args[0] == request
+ assert args[0] == patch_deployments.DeletePatchDeploymentRequest()
# Establish that the response is the type that we expect.
assert response is None
+@pytest.mark.asyncio
+async def test_delete_patch_deployment_async_from_dict():
+ await test_delete_patch_deployment_async(request_type=dict)
+
+
def test_delete_patch_deployment_field_headers():
- client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials())
+ client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
@@ -2229,7 +2661,7 @@ def test_delete_patch_deployment_field_headers():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.delete_patch_deployment), "__call__"
+ type(client.transport.delete_patch_deployment), "__call__"
) as call:
call.return_value = None
@@ -2242,12 +2674,12 @@ def test_delete_patch_deployment_field_headers():
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
- assert ("x-goog-request-params", "name=name/value") in kw["metadata"]
+ assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_patch_deployment_field_headers_async():
- client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials())
+ client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
@@ -2256,7 +2688,7 @@ async def test_delete_patch_deployment_field_headers_async():
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.delete_patch_deployment), "__call__"
+ type(client.transport.delete_patch_deployment), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
@@ -2269,48 +2701,49 @@ async def test_delete_patch_deployment_field_headers_async():
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
- assert ("x-goog-request-params", "name=name/value") in kw["metadata"]
+ assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
def test_delete_patch_deployment_flattened():
- client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials())
+ client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._transport.delete_patch_deployment), "__call__"
+ type(client.transport.delete_patch_deployment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
- client.delete_patch_deployment(name="name_value")
+ client.delete_patch_deployment(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
+
assert args[0].name == "name_value"
def test_delete_patch_deployment_flattened_error():
- client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials())
+ client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_patch_deployment(
- patch_deployments.DeletePatchDeploymentRequest(), name="name_value"
+ patch_deployments.DeletePatchDeploymentRequest(), name="name_value",
)
@pytest.mark.asyncio
async def test_delete_patch_deployment_flattened_async():
- client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials())
+ client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
- type(client._client._transport.delete_patch_deployment), "__call__"
+ type(client.transport.delete_patch_deployment), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
@@ -2318,73 +2751,121 @@ async def test_delete_patch_deployment_flattened_async():
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
- response = await client.delete_patch_deployment(name="name_value")
+ response = await client.delete_patch_deployment(name="name_value",)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
+
assert args[0].name == "name_value"
@pytest.mark.asyncio
async def test_delete_patch_deployment_flattened_error_async():
- client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials())
+ client = OsConfigServiceAsyncClient(credentials=credentials.AnonymousCredentials(),)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_patch_deployment(
- patch_deployments.DeletePatchDeploymentRequest(), name="name_value"
+ patch_deployments.DeletePatchDeploymentRequest(), name="name_value",
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.OsConfigServiceGrpcTransport(
- credentials=credentials.AnonymousCredentials()
+ credentials=credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = OsConfigServiceClient(
- credentials=credentials.AnonymousCredentials(), transport=transport
+ credentials=credentials.AnonymousCredentials(), transport=transport,
+ )
+
+ # It is an error to provide a credentials file and a transport instance.
+ transport = transports.OsConfigServiceGrpcTransport(
+ credentials=credentials.AnonymousCredentials(),
+ )
+ with pytest.raises(ValueError):
+ client = OsConfigServiceClient(
+ client_options={"credentials_file": "credentials.json"},
+ transport=transport,
+ )
+
+ # It is an error to provide scopes and a transport instance.
+ transport = transports.OsConfigServiceGrpcTransport(
+ credentials=credentials.AnonymousCredentials(),
+ )
+ with pytest.raises(ValueError):
+ client = OsConfigServiceClient(
+ client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.OsConfigServiceGrpcTransport(
- credentials=credentials.AnonymousCredentials()
+ credentials=credentials.AnonymousCredentials(),
)
client = OsConfigServiceClient(transport=transport)
- assert client._transport is transport
+ assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.OsConfigServiceGrpcTransport(
- credentials=credentials.AnonymousCredentials()
+ credentials=credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.OsConfigServiceGrpcAsyncIOTransport(
- credentials=credentials.AnonymousCredentials()
+ credentials=credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.OsConfigServiceGrpcTransport,
+ transports.OsConfigServiceGrpcAsyncIOTransport,
+ ],
+)
+def test_transport_adc(transport_class):
+ # Test default credentials are used if not provided.
+ with mock.patch.object(auth, "default") as adc:
+ adc.return_value = (credentials.AnonymousCredentials(), None)
+ transport_class()
+ adc.assert_called_once()
+
+
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
- client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials())
- assert isinstance(client._transport, transports.OsConfigServiceGrpcTransport)
+ client = OsConfigServiceClient(credentials=credentials.AnonymousCredentials(),)
+ assert isinstance(client.transport, transports.OsConfigServiceGrpcTransport,)
+
+
+def test_os_config_service_base_transport_error():
+ # Passing both a credentials object and credentials_file should raise an error
+ with pytest.raises(exceptions.DuplicateCredentialArgs):
+ transport = transports.OsConfigServiceTransport(
+ credentials=credentials.AnonymousCredentials(),
+ credentials_file="credentials.json",
+ )
def test_os_config_service_base_transport():
# Instantiate the base transport.
- transport = transports.OsConfigServiceTransport(
- credentials=credentials.AnonymousCredentials()
- )
+ with mock.patch(
+ "google.cloud.osconfig_v1.services.os_config_service.transports.OsConfigServiceTransport.__init__"
+ ) as Transport:
+ Transport.return_value = None
+ transport = transports.OsConfigServiceTransport(
+ credentials=credentials.AnonymousCredentials(),
+ )
# Every method on the transport should just blindly
# raise NotImplementedError.
@@ -2404,13 +2885,44 @@ def test_os_config_service_base_transport():
getattr(transport, method)(request=object())
+def test_os_config_service_base_transport_with_credentials_file():
+ # Instantiate the base transport with a credentials file
+ with mock.patch.object(
+ auth, "load_credentials_from_file"
+ ) as load_creds, mock.patch(
+ "google.cloud.osconfig_v1.services.os_config_service.transports.OsConfigServiceTransport._prep_wrapped_messages"
+ ) as Transport:
+ Transport.return_value = None
+ load_creds.return_value = (credentials.AnonymousCredentials(), None)
+ transport = transports.OsConfigServiceTransport(
+ credentials_file="credentials.json", quota_project_id="octopus",
+ )
+ load_creds.assert_called_once_with(
+ "credentials.json",
+ scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id="octopus",
+ )
+
+
+def test_os_config_service_base_transport_with_adc():
+ # Test the default credentials are used if credentials and credentials_file are None.
+ with mock.patch.object(auth, "default") as adc, mock.patch(
+ "google.cloud.osconfig_v1.services.os_config_service.transports.OsConfigServiceTransport._prep_wrapped_messages"
+ ) as Transport:
+ Transport.return_value = None
+ adc.return_value = (credentials.AnonymousCredentials(), None)
+ transport = transports.OsConfigServiceTransport()
+ adc.assert_called_once()
+
+
def test_os_config_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(auth, "default") as adc:
adc.return_value = (credentials.AnonymousCredentials(), None)
OsConfigServiceClient()
adc.assert_called_once_with(
- scopes=("https://www.googleapis.com/auth/cloud-platform",)
+ scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id=None,
)
@@ -2419,9 +2931,12 @@ def test_os_config_service_transport_auth_adc():
# ADC credentials.
with mock.patch.object(auth, "default") as adc:
adc.return_value = (credentials.AnonymousCredentials(), None)
- transports.OsConfigServiceGrpcTransport(host="squid.clam.whelk")
+ transports.OsConfigServiceGrpcTransport(
+ host="squid.clam.whelk", quota_project_id="octopus"
+ )
adc.assert_called_once_with(
- scopes=("https://www.googleapis.com/auth/cloud-platform",)
+ scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id="octopus",
)
@@ -2432,7 +2947,7 @@ def test_os_config_service_host_no_port():
api_endpoint="osconfig.googleapis.com"
),
)
- assert client._transport._host == "osconfig.googleapis.com:443"
+ assert client.transport._host == "osconfig.googleapis.com:443"
def test_os_config_service_host_with_port():
@@ -2442,194 +2957,309 @@ def test_os_config_service_host_with_port():
api_endpoint="osconfig.googleapis.com:8000"
),
)
- assert client._transport._host == "osconfig.googleapis.com:8000"
+ assert client.transport._host == "osconfig.googleapis.com:8000"
def test_os_config_service_grpc_transport_channel():
channel = grpc.insecure_channel("http://localhost/")
- # Check that if channel is provided, mtls endpoint and client_cert_source
- # won't be used.
- callback = mock.MagicMock()
+ # Check that channel is used if provided.
transport = transports.OsConfigServiceGrpcTransport(
- host="squid.clam.whelk",
- channel=channel,
- api_mtls_endpoint="mtls.squid.clam.whelk",
- client_cert_source=callback,
+ host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
- assert not callback.called
+ assert transport._ssl_channel_credentials == None
def test_os_config_service_grpc_asyncio_transport_channel():
channel = aio.insecure_channel("http://localhost/")
- # Check that if channel is provided, mtls endpoint and client_cert_source
- # won't be used.
- callback = mock.MagicMock()
+ # Check that channel is used if provided.
transport = transports.OsConfigServiceGrpcAsyncIOTransport(
- host="squid.clam.whelk",
- channel=channel,
- api_mtls_endpoint="mtls.squid.clam.whelk",
- client_cert_source=callback,
+ host="squid.clam.whelk", channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
- assert not callback.called
-
-
-@mock.patch("grpc.ssl_channel_credentials", autospec=True)
-@mock.patch("google.api_core.grpc_helpers.create_channel", autospec=True)
-def test_os_config_service_grpc_transport_channel_mtls_with_client_cert_source(
- grpc_create_channel, grpc_ssl_channel_cred
-):
- # Check that if channel is None, but api_mtls_endpoint and client_cert_source
- # are provided, then a mTLS channel will be created.
- mock_cred = mock.Mock()
-
- mock_ssl_cred = mock.Mock()
- grpc_ssl_channel_cred.return_value = mock_ssl_cred
-
- mock_grpc_channel = mock.Mock()
- grpc_create_channel.return_value = mock_grpc_channel
-
- transport = transports.OsConfigServiceGrpcTransport(
- host="squid.clam.whelk",
- credentials=mock_cred,
- api_mtls_endpoint="mtls.squid.clam.whelk",
- client_cert_source=client_cert_source_callback,
- )
- grpc_ssl_channel_cred.assert_called_once_with(
- certificate_chain=b"cert bytes", private_key=b"key bytes"
- )
- grpc_create_channel.assert_called_once_with(
- "mtls.squid.clam.whelk:443",
- credentials=mock_cred,
- ssl_credentials=mock_ssl_cred,
- scopes=("https://www.googleapis.com/auth/cloud-platform",),
- )
- assert transport.grpc_channel == mock_grpc_channel
+ assert transport._ssl_channel_credentials == None
-@mock.patch("grpc.ssl_channel_credentials", autospec=True)
-@mock.patch("google.api_core.grpc_helpers_async.create_channel", autospec=True)
-def test_os_config_service_grpc_asyncio_transport_channel_mtls_with_client_cert_source(
- grpc_create_channel, grpc_ssl_channel_cred
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.OsConfigServiceGrpcTransport,
+ transports.OsConfigServiceGrpcAsyncIOTransport,
+ ],
+)
+def test_os_config_service_transport_channel_mtls_with_client_cert_source(
+ transport_class,
):
- # Check that if channel is None, but api_mtls_endpoint and client_cert_source
- # are provided, then a mTLS channel will be created.
- mock_cred = mock.Mock()
-
- mock_ssl_cred = mock.Mock()
- grpc_ssl_channel_cred.return_value = mock_ssl_cred
-
- mock_grpc_channel = mock.Mock()
- grpc_create_channel.return_value = mock_grpc_channel
-
- transport = transports.OsConfigServiceGrpcAsyncIOTransport(
- host="squid.clam.whelk",
- credentials=mock_cred,
- api_mtls_endpoint="mtls.squid.clam.whelk",
- client_cert_source=client_cert_source_callback,
- )
- grpc_ssl_channel_cred.assert_called_once_with(
- certificate_chain=b"cert bytes", private_key=b"key bytes"
- )
- grpc_create_channel.assert_called_once_with(
- "mtls.squid.clam.whelk:443",
- credentials=mock_cred,
- ssl_credentials=mock_ssl_cred,
- scopes=("https://www.googleapis.com/auth/cloud-platform",),
- )
- assert transport.grpc_channel == mock_grpc_channel
+ with mock.patch(
+ "grpc.ssl_channel_credentials", autospec=True
+ ) as grpc_ssl_channel_cred:
+ with mock.patch.object(
+ transport_class, "create_channel", autospec=True
+ ) as grpc_create_channel:
+ mock_ssl_cred = mock.Mock()
+ grpc_ssl_channel_cred.return_value = mock_ssl_cred
+
+ mock_grpc_channel = mock.Mock()
+ grpc_create_channel.return_value = mock_grpc_channel
+
+ cred = credentials.AnonymousCredentials()
+ with pytest.warns(DeprecationWarning):
+ with mock.patch.object(auth, "default") as adc:
+ adc.return_value = (cred, None)
+ transport = transport_class(
+ host="squid.clam.whelk",
+ api_mtls_endpoint="mtls.squid.clam.whelk",
+ client_cert_source=client_cert_source_callback,
+ )
+ adc.assert_called_once()
+
+ grpc_ssl_channel_cred.assert_called_once_with(
+ certificate_chain=b"cert bytes", private_key=b"key bytes"
+ )
+ grpc_create_channel.assert_called_once_with(
+ "mtls.squid.clam.whelk:443",
+ credentials=cred,
+ credentials_file=None,
+ scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ ssl_credentials=mock_ssl_cred,
+ quota_project_id=None,
+ )
+ assert transport.grpc_channel == mock_grpc_channel
+ assert transport._ssl_channel_credentials == mock_ssl_cred
@pytest.mark.parametrize(
- "api_mtls_endpoint", ["mtls.squid.clam.whelk", "mtls.squid.clam.whelk:443"]
+ "transport_class",
+ [
+ transports.OsConfigServiceGrpcTransport,
+ transports.OsConfigServiceGrpcAsyncIOTransport,
+ ],
)
-@mock.patch("google.api_core.grpc_helpers.create_channel", autospec=True)
-def test_os_config_service_grpc_transport_channel_mtls_with_adc(
- grpc_create_channel, api_mtls_endpoint
-):
- # Check that if channel and client_cert_source are None, but api_mtls_endpoint
- # is provided, then a mTLS channel will be created with SSL ADC.
- mock_grpc_channel = mock.Mock()
- grpc_create_channel.return_value = mock_grpc_channel
-
- # Mock google.auth.transport.grpc.SslCredentials class.
+def test_os_config_service_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
- mock_cred = mock.Mock()
- transport = transports.OsConfigServiceGrpcTransport(
- host="squid.clam.whelk",
- credentials=mock_cred,
- api_mtls_endpoint=api_mtls_endpoint,
- client_cert_source=None,
- )
- grpc_create_channel.assert_called_once_with(
- "mtls.squid.clam.whelk:443",
- credentials=mock_cred,
- ssl_credentials=mock_ssl_cred,
- scopes=("https://www.googleapis.com/auth/cloud-platform",),
- )
- assert transport.grpc_channel == mock_grpc_channel
+ with mock.patch.object(
+ transport_class, "create_channel", autospec=True
+ ) as grpc_create_channel:
+ mock_grpc_channel = mock.Mock()
+ grpc_create_channel.return_value = mock_grpc_channel
+ mock_cred = mock.Mock()
+
+ with pytest.warns(DeprecationWarning):
+ transport = transport_class(
+ host="squid.clam.whelk",
+ credentials=mock_cred,
+ api_mtls_endpoint="mtls.squid.clam.whelk",
+ client_cert_source=None,
+ )
+
+ grpc_create_channel.assert_called_once_with(
+ "mtls.squid.clam.whelk:443",
+ credentials=mock_cred,
+ credentials_file=None,
+ scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ ssl_credentials=mock_ssl_cred,
+ quota_project_id=None,
+ )
+ assert transport.grpc_channel == mock_grpc_channel
-@pytest.mark.parametrize(
- "api_mtls_endpoint", ["mtls.squid.clam.whelk", "mtls.squid.clam.whelk:443"]
-)
-@mock.patch("google.api_core.grpc_helpers_async.create_channel", autospec=True)
-def test_os_config_service_grpc_asyncio_transport_channel_mtls_with_adc(
- grpc_create_channel, api_mtls_endpoint
-):
- # Check that if channel and client_cert_source are None, but api_mtls_endpoint
- # is provided, then a mTLS channel will be created with SSL ADC.
- mock_grpc_channel = mock.Mock()
- grpc_create_channel.return_value = mock_grpc_channel
+def test_instance_path():
+ project = "squid"
+ zone = "clam"
+ instance = "whelk"
- # Mock google.auth.transport.grpc.SslCredentials class.
- mock_ssl_cred = mock.Mock()
- with mock.patch.multiple(
- "google.auth.transport.grpc.SslCredentials",
- __init__=mock.Mock(return_value=None),
- ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
- ):
- mock_cred = mock.Mock()
- transport = transports.OsConfigServiceGrpcAsyncIOTransport(
- host="squid.clam.whelk",
- credentials=mock_cred,
- api_mtls_endpoint=api_mtls_endpoint,
- client_cert_source=None,
- )
- grpc_create_channel.assert_called_once_with(
- "mtls.squid.clam.whelk:443",
- credentials=mock_cred,
- ssl_credentials=mock_ssl_cred,
- scopes=("https://www.googleapis.com/auth/cloud-platform",),
- )
- assert transport.grpc_channel == mock_grpc_channel
+ expected = "projects/{project}/zones/{zone}/instances/{instance}".format(
+ project=project, zone=zone, instance=instance,
+ )
+ actual = OsConfigServiceClient.instance_path(project, zone, instance)
+ assert expected == actual
+
+
+def test_parse_instance_path():
+ expected = {
+ "project": "octopus",
+ "zone": "oyster",
+ "instance": "nudibranch",
+ }
+ path = OsConfigServiceClient.instance_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = OsConfigServiceClient.parse_instance_path(path)
+ assert expected == actual
def test_patch_deployment_path():
- project = "squid"
- patch_deployment = "clam"
+ project = "cuttlefish"
+ patch_deployment = "mussel"
expected = "projects/{project}/patchDeployments/{patch_deployment}".format(
- project=project, patch_deployment=patch_deployment
+ project=project, patch_deployment=patch_deployment,
)
actual = OsConfigServiceClient.patch_deployment_path(project, patch_deployment)
assert expected == actual
def test_parse_patch_deployment_path():
- expected = {"project": "whelk", "patch_deployment": "octopus"}
+ expected = {
+ "project": "winkle",
+ "patch_deployment": "nautilus",
+ }
path = OsConfigServiceClient.patch_deployment_path(**expected)
# Check that the path construction is reversible.
actual = OsConfigServiceClient.parse_patch_deployment_path(path)
assert expected == actual
+
+
+def test_patch_job_path():
+ project = "scallop"
+ patch_job = "abalone"
+
+ expected = "projects/{project}/patchJobs/{patch_job}".format(
+ project=project, patch_job=patch_job,
+ )
+ actual = OsConfigServiceClient.patch_job_path(project, patch_job)
+ assert expected == actual
+
+
+def test_parse_patch_job_path():
+ expected = {
+ "project": "squid",
+ "patch_job": "clam",
+ }
+ path = OsConfigServiceClient.patch_job_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = OsConfigServiceClient.parse_patch_job_path(path)
+ assert expected == actual
+
+
+def test_common_billing_account_path():
+ billing_account = "whelk"
+
+ expected = "billingAccounts/{billing_account}".format(
+ billing_account=billing_account,
+ )
+ actual = OsConfigServiceClient.common_billing_account_path(billing_account)
+ assert expected == actual
+
+
+def test_parse_common_billing_account_path():
+ expected = {
+ "billing_account": "octopus",
+ }
+ path = OsConfigServiceClient.common_billing_account_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = OsConfigServiceClient.parse_common_billing_account_path(path)
+ assert expected == actual
+
+
+def test_common_folder_path():
+ folder = "oyster"
+
+ expected = "folders/{folder}".format(folder=folder,)
+ actual = OsConfigServiceClient.common_folder_path(folder)
+ assert expected == actual
+
+
+def test_parse_common_folder_path():
+ expected = {
+ "folder": "nudibranch",
+ }
+ path = OsConfigServiceClient.common_folder_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = OsConfigServiceClient.parse_common_folder_path(path)
+ assert expected == actual
+
+
+def test_common_organization_path():
+ organization = "cuttlefish"
+
+ expected = "organizations/{organization}".format(organization=organization,)
+ actual = OsConfigServiceClient.common_organization_path(organization)
+ assert expected == actual
+
+
+def test_parse_common_organization_path():
+ expected = {
+ "organization": "mussel",
+ }
+ path = OsConfigServiceClient.common_organization_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = OsConfigServiceClient.parse_common_organization_path(path)
+ assert expected == actual
+
+
+def test_common_project_path():
+ project = "winkle"
+
+ expected = "projects/{project}".format(project=project,)
+ actual = OsConfigServiceClient.common_project_path(project)
+ assert expected == actual
+
+
+def test_parse_common_project_path():
+ expected = {
+ "project": "nautilus",
+ }
+ path = OsConfigServiceClient.common_project_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = OsConfigServiceClient.parse_common_project_path(path)
+ assert expected == actual
+
+
+def test_common_location_path():
+ project = "scallop"
+ location = "abalone"
+
+ expected = "projects/{project}/locations/{location}".format(
+ project=project, location=location,
+ )
+ actual = OsConfigServiceClient.common_location_path(project, location)
+ assert expected == actual
+
+
+def test_parse_common_location_path():
+ expected = {
+ "project": "squid",
+ "location": "clam",
+ }
+ path = OsConfigServiceClient.common_location_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = OsConfigServiceClient.parse_common_location_path(path)
+ assert expected == actual
+
+
+def test_client_withDEFAULT_CLIENT_INFO():
+ client_info = gapic_v1.client_info.ClientInfo()
+
+ with mock.patch.object(
+ transports.OsConfigServiceTransport, "_prep_wrapped_messages"
+ ) as prep:
+ client = OsConfigServiceClient(
+ credentials=credentials.AnonymousCredentials(), client_info=client_info,
+ )
+ prep.assert_called_once_with(client_info)
+
+ with mock.patch.object(
+ transports.OsConfigServiceTransport, "_prep_wrapped_messages"
+ ) as prep:
+ transport_class = OsConfigServiceClient.get_transport_class()
+ transport = transport_class(
+ credentials=credentials.AnonymousCredentials(), client_info=client_info,
+ )
+ prep.assert_called_once_with(client_info)