diff --git a/.flake8 b/.flake8
index ac68f2a..29227d4 100644
--- a/.flake8
+++ b/.flake8
@@ -16,14 +16,17 @@
# Generated by synthtool. DO NOT EDIT!
[flake8]
-ignore = E203, E266, E501, W503, F401, F841, E712
+ignore = E203, E266, E501, W503
exclude =
# Exclude generated code.
**/proto/**
**/gapic/**
+ **/services/**
+ **/types/**
*_pb2.py
# Standard linting exemptions.
+ **/.nox/**
__pycache__,
.git,
*.pyc,
diff --git a/.github/snippet-bot.yml b/.github/snippet-bot.yml
new file mode 100644
index 0000000..e69de29
diff --git a/.gitignore b/.gitignore
index df79b14..b9daa52 100644
--- a/.gitignore
+++ b/.gitignore
@@ -10,6 +10,7 @@
dist
build
eggs
+.eggs
parts
bin
var
@@ -44,15 +45,17 @@ pip-log.txt
# Built documentation
docs/_build
-htmlcov
+bigquery/docs/generated
+docs.metadata
# Virtual environment
env/
coverage.xml
+sponge_log.xml
# System test environment variables.
system_tests/local_test_setup
# Make sure a generated file isn't accidentally committed.
pylintrc
-pylintrc.test
\ No newline at end of file
+pylintrc.test
diff --git a/.kokoro/build.sh b/.kokoro/build.sh
index 6c21d00..b96af36 100755
--- a/.kokoro/build.sh
+++ b/.kokoro/build.sh
@@ -36,4 +36,10 @@ python3.6 -m pip uninstall --yes --quiet nox-automation
python3.6 -m pip install --upgrade --quiet nox
python3.6 -m nox --version
-python3.6 -m nox
+# If NOX_SESSION is set, it only runs the specified session,
+# otherwise run all the sessions.
+if [[ -n "${NOX_SESSION:-}" ]]; then
+ python3.6 -m nox -s "${NOX_SESSION:-}"
+else
+ python3.6 -m nox
+fi
diff --git a/.kokoro/docker/docs/Dockerfile b/.kokoro/docker/docs/Dockerfile
new file mode 100644
index 0000000..412b0b5
--- /dev/null
+++ b/.kokoro/docker/docs/Dockerfile
@@ -0,0 +1,98 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ubuntu:20.04
+
+ENV DEBIAN_FRONTEND noninteractive
+
+# Ensure local Python is preferred over distribution Python.
+ENV PATH /usr/local/bin:$PATH
+
+# Install dependencies.
+RUN apt-get update \
+ && apt-get install -y --no-install-recommends \
+ apt-transport-https \
+ build-essential \
+ ca-certificates \
+ curl \
+ dirmngr \
+ git \
+ gpg-agent \
+ graphviz \
+ libbz2-dev \
+ libdb5.3-dev \
+ libexpat1-dev \
+ libffi-dev \
+ liblzma-dev \
+ libreadline-dev \
+ libsnappy-dev \
+ libssl-dev \
+ libsqlite3-dev \
+ portaudio19-dev \
+ redis-server \
+ software-properties-common \
+ ssh \
+ sudo \
+ tcl \
+ tcl-dev \
+ tk \
+ tk-dev \
+ uuid-dev \
+ wget \
+ zlib1g-dev \
+ && add-apt-repository universe \
+ && apt-get update \
+ && apt-get -y install jq \
+ && apt-get clean autoclean \
+ && apt-get autoremove -y \
+ && rm -rf /var/lib/apt/lists/* \
+ && rm -f /var/cache/apt/archives/*.deb
+
+
+COPY fetch_gpg_keys.sh /tmp
+# Install the desired versions of Python.
+RUN set -ex \
+ && export GNUPGHOME="$(mktemp -d)" \
+ && echo "disable-ipv6" >> "${GNUPGHOME}/dirmngr.conf" \
+ && /tmp/fetch_gpg_keys.sh \
+ && for PYTHON_VERSION in 3.7.8 3.8.5; do \
+ wget --no-check-certificate -O python-${PYTHON_VERSION}.tar.xz "https://www.python.org/ftp/python/${PYTHON_VERSION%%[a-z]*}/Python-$PYTHON_VERSION.tar.xz" \
+ && wget --no-check-certificate -O python-${PYTHON_VERSION}.tar.xz.asc "https://www.python.org/ftp/python/${PYTHON_VERSION%%[a-z]*}/Python-$PYTHON_VERSION.tar.xz.asc" \
+ && gpg --batch --verify python-${PYTHON_VERSION}.tar.xz.asc python-${PYTHON_VERSION}.tar.xz \
+ && rm -r python-${PYTHON_VERSION}.tar.xz.asc \
+ && mkdir -p /usr/src/python-${PYTHON_VERSION} \
+ && tar -xJC /usr/src/python-${PYTHON_VERSION} --strip-components=1 -f python-${PYTHON_VERSION}.tar.xz \
+ && rm python-${PYTHON_VERSION}.tar.xz \
+ && cd /usr/src/python-${PYTHON_VERSION} \
+ && ./configure \
+ --enable-shared \
+ # This works only on Python 2.7 and throws a warning on every other
+ # version, but seems otherwise harmless.
+ --enable-unicode=ucs4 \
+ --with-system-ffi \
+ --without-ensurepip \
+ && make -j$(nproc) \
+ && make install \
+ && ldconfig \
+ ; done \
+ && rm -rf "${GNUPGHOME}" \
+ && rm -rf /usr/src/python* \
+ && rm -rf ~/.cache/
+
+RUN wget -O /tmp/get-pip.py 'https://bootstrap.pypa.io/get-pip.py' \
+ && python3.7 /tmp/get-pip.py \
+ && python3.8 /tmp/get-pip.py \
+ && rm /tmp/get-pip.py
+
+CMD ["python3.7"]
diff --git a/.kokoro/docker/docs/fetch_gpg_keys.sh b/.kokoro/docker/docs/fetch_gpg_keys.sh
new file mode 100755
index 0000000..d653dd8
--- /dev/null
+++ b/.kokoro/docker/docs/fetch_gpg_keys.sh
@@ -0,0 +1,45 @@
+#!/bin/bash
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# A script to fetch gpg keys with retry.
+# Avoid jinja parsing the file.
+#
+
+function retry {
+ if [[ "${#}" -le 1 ]]; then
+ echo "Usage: ${0} retry_count commands.."
+ exit 1
+ fi
+ local retries=${1}
+ local command="${@:2}"
+ until [[ "${retries}" -le 0 ]]; do
+ $command && return 0
+ if [[ $? -ne 0 ]]; then
+ echo "command failed, retrying"
+ ((retries--))
+ fi
+ done
+ return 1
+}
+
+# 3.6.9, 3.7.5 (Ned Deily)
+retry 3 gpg --keyserver ha.pool.sks-keyservers.net --recv-keys \
+ 0D96DF4D4110E5C43FBFB17F2D347EA6AA65421D
+
+# 3.8.0 (Łukasz Langa)
+retry 3 gpg --keyserver ha.pool.sks-keyservers.net --recv-keys \
+ E3FF2839C048B25C084DEBE9B26995E310250568
+
+#
diff --git a/.kokoro/docs/common.cfg b/.kokoro/docs/common.cfg
index 2054a56..6197ce8 100644
--- a/.kokoro/docs/common.cfg
+++ b/.kokoro/docs/common.cfg
@@ -11,12 +11,12 @@ action {
gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline"
# Use the trampoline script to run in docker.
-build_file: "python-memcache/.kokoro/trampoline.sh"
+build_file: "python-memcache/.kokoro/trampoline_v2.sh"
# Configure the docker image for kokoro-trampoline.
env_vars: {
key: "TRAMPOLINE_IMAGE"
- value: "gcr.io/cloud-devrel-kokoro-resources/python-multi"
+ value: "gcr.io/cloud-devrel-kokoro-resources/python-lib-docs"
}
env_vars: {
key: "TRAMPOLINE_BUILD_FILE"
@@ -28,6 +28,23 @@ env_vars: {
value: "docs-staging"
}
+env_vars: {
+ key: "V2_STAGING_BUCKET"
+ value: "docs-staging-v2"
+}
+
+# It will upload the docker image after successful builds.
+env_vars: {
+ key: "TRAMPOLINE_IMAGE_UPLOAD"
+ value: "true"
+}
+
+# It will always build the docker image.
+env_vars: {
+ key: "TRAMPOLINE_DOCKERFILE"
+ value: ".kokoro/docker/docs/Dockerfile"
+}
+
# Fetch the token needed for reporting release status to GitHub
before_action {
fetch_keystore {
diff --git a/.kokoro/docs/docs-presubmit.cfg b/.kokoro/docs/docs-presubmit.cfg
new file mode 100644
index 0000000..1118107
--- /dev/null
+++ b/.kokoro/docs/docs-presubmit.cfg
@@ -0,0 +1,17 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "STAGING_BUCKET"
+ value: "gcloud-python-test"
+}
+
+env_vars: {
+ key: "V2_STAGING_BUCKET"
+ value: "gcloud-python-test"
+}
+
+# We only upload the image in the main `docs` build.
+env_vars: {
+ key: "TRAMPOLINE_IMAGE_UPLOAD"
+ value: "false"
+}
diff --git a/.kokoro/populate-secrets.sh b/.kokoro/populate-secrets.sh
new file mode 100755
index 0000000..f525142
--- /dev/null
+++ b/.kokoro/populate-secrets.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+# Copyright 2020 Google LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -eo pipefail
+
+function now { date +"%Y-%m-%d %H:%M:%S" | tr -d '\n' ;}
+function msg { println "$*" >&2 ;}
+function println { printf '%s\n' "$(now) $*" ;}
+
+
+# Populates requested secrets set in SECRET_MANAGER_KEYS from service account:
+# kokoro-trampoline@cloud-devrel-kokoro-resources.iam.gserviceaccount.com
+SECRET_LOCATION="${KOKORO_GFILE_DIR}/secret_manager"
+msg "Creating folder on disk for secrets: ${SECRET_LOCATION}"
+mkdir -p ${SECRET_LOCATION}
+for key in $(echo ${SECRET_MANAGER_KEYS} | sed "s/,/ /g")
+do
+ msg "Retrieving secret ${key}"
+ docker run --entrypoint=gcloud \
+ --volume=${KOKORO_GFILE_DIR}:${KOKORO_GFILE_DIR} \
+ gcr.io/google.com/cloudsdktool/cloud-sdk \
+ secrets versions access latest \
+ --project cloud-devrel-kokoro-resources \
+ --secret ${key} > \
+ "${SECRET_LOCATION}/${key}"
+ if [[ $? == 0 ]]; then
+ msg "Secret written to ${SECRET_LOCATION}/${key}"
+ else
+ msg "Error retrieving secret ${key}"
+ fi
+done
diff --git a/.kokoro/publish-docs.sh b/.kokoro/publish-docs.sh
index 2bb90a9..8acb14e 100755
--- a/.kokoro/publish-docs.sh
+++ b/.kokoro/publish-docs.sh
@@ -18,26 +18,16 @@ set -eo pipefail
# Disable buffering, so that the logs stream through.
export PYTHONUNBUFFERED=1
-cd github/python-memcache
-
-# Remove old nox
-python3.6 -m pip uninstall --yes --quiet nox-automation
+export PATH="${HOME}/.local/bin:${PATH}"
# Install nox
-python3.6 -m pip install --upgrade --quiet nox
-python3.6 -m nox --version
+python3 -m pip install --user --upgrade --quiet nox
+python3 -m nox --version
# build docs
nox -s docs
-python3 -m pip install gcp-docuploader
-
-# install a json parser
-sudo apt-get update
-sudo apt-get -y install software-properties-common
-sudo add-apt-repository universe
-sudo apt-get update
-sudo apt-get -y install jq
+python3 -m pip install --user gcp-docuploader
# create metadata
python3 -m docuploader create-metadata \
@@ -52,4 +42,23 @@ python3 -m docuploader create-metadata \
cat docs.metadata
# upload docs
-python3 -m docuploader upload docs/_build/html --metadata-file docs.metadata --staging-bucket docs-staging
+python3 -m docuploader upload docs/_build/html --metadata-file docs.metadata --staging-bucket "${STAGING_BUCKET}"
+
+
+# docfx yaml files
+nox -s docfx
+
+# create metadata.
+python3 -m docuploader create-metadata \
+ --name=$(jq --raw-output '.name // empty' .repo-metadata.json) \
+ --version=$(python3 setup.py --version) \
+ --language=$(jq --raw-output '.language // empty' .repo-metadata.json) \
+ --distribution-name=$(python3 setup.py --name) \
+ --product-page=$(jq --raw-output '.product_documentation // empty' .repo-metadata.json) \
+ --github-repository=$(jq --raw-output '.repo // empty' .repo-metadata.json) \
+ --issue-tracker=$(jq --raw-output '.issue_tracker // empty' .repo-metadata.json)
+
+cat docs.metadata
+
+# upload docs
+python3 -m docuploader upload docs/_build/html/docfx_yaml --metadata-file docs.metadata --destination-prefix docfx --staging-bucket "${V2_STAGING_BUCKET}"
diff --git a/.kokoro/release/common.cfg b/.kokoro/release/common.cfg
index 858e856..b4b40c4 100644
--- a/.kokoro/release/common.cfg
+++ b/.kokoro/release/common.cfg
@@ -23,42 +23,18 @@ env_vars: {
value: "github/python-memcache/.kokoro/release.sh"
}
-# Fetch the token needed for reporting release status to GitHub
-before_action {
- fetch_keystore {
- keystore_resource {
- keystore_config_id: 73713
- keyname: "yoshi-automation-github-key"
- }
- }
-}
-
-# Fetch PyPI password
-before_action {
- fetch_keystore {
- keystore_resource {
- keystore_config_id: 73713
- keyname: "google_cloud_pypi_password"
- }
- }
-}
-
-# Fetch magictoken to use with Magic Github Proxy
-before_action {
- fetch_keystore {
- keystore_resource {
- keystore_config_id: 73713
- keyname: "releasetool-magictoken"
- }
- }
+# Fetch PyPI password
+before_action {
+ fetch_keystore {
+ keystore_resource {
+ keystore_config_id: 73713
+ keyname: "google_cloud_pypi_password"
+ }
+ }
}
-# Fetch api key to use with Magic Github Proxy
-before_action {
- fetch_keystore {
- keystore_resource {
- keystore_config_id: 73713
- keyname: "magic-github-proxy-api-key"
- }
- }
-}
+# Tokens needed to report release status back to GitHub
+env_vars: {
+ key: "SECRET_MANAGER_KEYS"
+ value: "releasetool-publish-reporter-app,releasetool-publish-reporter-googleapis-installation,releasetool-publish-reporter-pem"
+}
\ No newline at end of file
diff --git a/.kokoro/samples/lint/common.cfg b/.kokoro/samples/lint/common.cfg
new file mode 100644
index 0000000..31f0ad7
--- /dev/null
+++ b/.kokoro/samples/lint/common.cfg
@@ -0,0 +1,34 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+# Build logs will be here
+action {
+ define_artifacts {
+ regex: "**/*sponge_log.xml"
+ }
+}
+
+# Specify which tests to run
+env_vars: {
+ key: "RUN_TESTS_SESSION"
+ value: "lint"
+}
+
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/python-memcache/.kokoro/test-samples.sh"
+}
+
+# Configure the docker image for kokoro-trampoline.
+env_vars: {
+ key: "TRAMPOLINE_IMAGE"
+ value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker"
+}
+
+# Download secrets for samples
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples"
+
+# Download trampoline resources.
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline"
+
+# Use the trampoline script to run in docker.
+build_file: "python-memcache/.kokoro/trampoline.sh"
\ No newline at end of file
diff --git a/.kokoro/samples/lint/continuous.cfg b/.kokoro/samples/lint/continuous.cfg
new file mode 100644
index 0000000..a1c8d97
--- /dev/null
+++ b/.kokoro/samples/lint/continuous.cfg
@@ -0,0 +1,6 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
\ No newline at end of file
diff --git a/.kokoro/samples/lint/periodic.cfg b/.kokoro/samples/lint/periodic.cfg
new file mode 100644
index 0000000..50fec96
--- /dev/null
+++ b/.kokoro/samples/lint/periodic.cfg
@@ -0,0 +1,6 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "False"
+}
\ No newline at end of file
diff --git a/.kokoro/samples/lint/presubmit.cfg b/.kokoro/samples/lint/presubmit.cfg
new file mode 100644
index 0000000..a1c8d97
--- /dev/null
+++ b/.kokoro/samples/lint/presubmit.cfg
@@ -0,0 +1,6 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
\ No newline at end of file
diff --git a/.kokoro/samples/python3.6/common.cfg b/.kokoro/samples/python3.6/common.cfg
new file mode 100644
index 0000000..bfd6190
--- /dev/null
+++ b/.kokoro/samples/python3.6/common.cfg
@@ -0,0 +1,40 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+# Build logs will be here
+action {
+ define_artifacts {
+ regex: "**/*sponge_log.xml"
+ }
+}
+
+# Specify which tests to run
+env_vars: {
+ key: "RUN_TESTS_SESSION"
+ value: "py-3.6"
+}
+
+# Declare build specific Cloud project.
+env_vars: {
+ key: "BUILD_SPECIFIC_GCLOUD_PROJECT"
+ value: "python-docs-samples-tests-py36"
+}
+
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/python-memcache/.kokoro/test-samples.sh"
+}
+
+# Configure the docker image for kokoro-trampoline.
+env_vars: {
+ key: "TRAMPOLINE_IMAGE"
+ value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker"
+}
+
+# Download secrets for samples
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples"
+
+# Download trampoline resources.
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline"
+
+# Use the trampoline script to run in docker.
+build_file: "python-memcache/.kokoro/trampoline.sh"
\ No newline at end of file
diff --git a/.kokoro/samples/python3.6/continuous.cfg b/.kokoro/samples/python3.6/continuous.cfg
new file mode 100644
index 0000000..7218af1
--- /dev/null
+++ b/.kokoro/samples/python3.6/continuous.cfg
@@ -0,0 +1,7 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
+
diff --git a/.kokoro/samples/python3.6/periodic.cfg b/.kokoro/samples/python3.6/periodic.cfg
new file mode 100644
index 0000000..50fec96
--- /dev/null
+++ b/.kokoro/samples/python3.6/periodic.cfg
@@ -0,0 +1,6 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "False"
+}
\ No newline at end of file
diff --git a/.kokoro/samples/python3.6/presubmit.cfg b/.kokoro/samples/python3.6/presubmit.cfg
new file mode 100644
index 0000000..a1c8d97
--- /dev/null
+++ b/.kokoro/samples/python3.6/presubmit.cfg
@@ -0,0 +1,6 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
\ No newline at end of file
diff --git a/.kokoro/samples/python3.7/common.cfg b/.kokoro/samples/python3.7/common.cfg
new file mode 100644
index 0000000..a7be3aa
--- /dev/null
+++ b/.kokoro/samples/python3.7/common.cfg
@@ -0,0 +1,40 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+# Build logs will be here
+action {
+ define_artifacts {
+ regex: "**/*sponge_log.xml"
+ }
+}
+
+# Specify which tests to run
+env_vars: {
+ key: "RUN_TESTS_SESSION"
+ value: "py-3.7"
+}
+
+# Declare build specific Cloud project.
+env_vars: {
+ key: "BUILD_SPECIFIC_GCLOUD_PROJECT"
+ value: "python-docs-samples-tests-py37"
+}
+
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/python-memcache/.kokoro/test-samples.sh"
+}
+
+# Configure the docker image for kokoro-trampoline.
+env_vars: {
+ key: "TRAMPOLINE_IMAGE"
+ value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker"
+}
+
+# Download secrets for samples
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples"
+
+# Download trampoline resources.
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline"
+
+# Use the trampoline script to run in docker.
+build_file: "python-memcache/.kokoro/trampoline.sh"
\ No newline at end of file
diff --git a/.kokoro/samples/python3.7/continuous.cfg b/.kokoro/samples/python3.7/continuous.cfg
new file mode 100644
index 0000000..a1c8d97
--- /dev/null
+++ b/.kokoro/samples/python3.7/continuous.cfg
@@ -0,0 +1,6 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
\ No newline at end of file
diff --git a/.kokoro/samples/python3.7/periodic.cfg b/.kokoro/samples/python3.7/periodic.cfg
new file mode 100644
index 0000000..50fec96
--- /dev/null
+++ b/.kokoro/samples/python3.7/periodic.cfg
@@ -0,0 +1,6 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "False"
+}
\ No newline at end of file
diff --git a/.kokoro/samples/python3.7/presubmit.cfg b/.kokoro/samples/python3.7/presubmit.cfg
new file mode 100644
index 0000000..a1c8d97
--- /dev/null
+++ b/.kokoro/samples/python3.7/presubmit.cfg
@@ -0,0 +1,6 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
\ No newline at end of file
diff --git a/.kokoro/samples/python3.8/common.cfg b/.kokoro/samples/python3.8/common.cfg
new file mode 100644
index 0000000..ec8abd3
--- /dev/null
+++ b/.kokoro/samples/python3.8/common.cfg
@@ -0,0 +1,40 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+# Build logs will be here
+action {
+ define_artifacts {
+ regex: "**/*sponge_log.xml"
+ }
+}
+
+# Specify which tests to run
+env_vars: {
+ key: "RUN_TESTS_SESSION"
+ value: "py-3.8"
+}
+
+# Declare build specific Cloud project.
+env_vars: {
+ key: "BUILD_SPECIFIC_GCLOUD_PROJECT"
+ value: "python-docs-samples-tests-py38"
+}
+
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/python-memcache/.kokoro/test-samples.sh"
+}
+
+# Configure the docker image for kokoro-trampoline.
+env_vars: {
+ key: "TRAMPOLINE_IMAGE"
+ value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker"
+}
+
+# Download secrets for samples
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples"
+
+# Download trampoline resources.
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline"
+
+# Use the trampoline script to run in docker.
+build_file: "python-memcache/.kokoro/trampoline.sh"
\ No newline at end of file
diff --git a/.kokoro/samples/python3.8/continuous.cfg b/.kokoro/samples/python3.8/continuous.cfg
new file mode 100644
index 0000000..a1c8d97
--- /dev/null
+++ b/.kokoro/samples/python3.8/continuous.cfg
@@ -0,0 +1,6 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
\ No newline at end of file
diff --git a/.kokoro/samples/python3.8/periodic.cfg b/.kokoro/samples/python3.8/periodic.cfg
new file mode 100644
index 0000000..50fec96
--- /dev/null
+++ b/.kokoro/samples/python3.8/periodic.cfg
@@ -0,0 +1,6 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "False"
+}
\ No newline at end of file
diff --git a/.kokoro/samples/python3.8/presubmit.cfg b/.kokoro/samples/python3.8/presubmit.cfg
new file mode 100644
index 0000000..a1c8d97
--- /dev/null
+++ b/.kokoro/samples/python3.8/presubmit.cfg
@@ -0,0 +1,6 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
\ No newline at end of file
diff --git a/.kokoro/test-samples.sh b/.kokoro/test-samples.sh
new file mode 100755
index 0000000..f2285be
--- /dev/null
+++ b/.kokoro/test-samples.sh
@@ -0,0 +1,110 @@
+#!/bin/bash
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# `-e` enables the script to automatically fail when a command fails
+# `-o pipefail` sets the exit code to the rightmost comment to exit with a non-zero
+set -eo pipefail
+# Enables `**` to include files nested inside sub-folders
+shopt -s globstar
+
+cd github/python-memcache
+
+# Run periodic samples tests at latest release
+if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"periodic"* ]]; then
+ LATEST_RELEASE=$(git describe --abbrev=0 --tags)
+ git checkout $LATEST_RELEASE
+fi
+
+# Exit early if samples directory doesn't exist
+if [ ! -d "./samples" ]; then
+ echo "No tests run. `./samples` not found"
+ exit 0
+fi
+
+# Disable buffering, so that the logs stream through.
+export PYTHONUNBUFFERED=1
+
+# Debug: show build environment
+env | grep KOKORO
+
+# Install nox
+python3.6 -m pip install --upgrade --quiet nox
+
+# Use secrets acessor service account to get secrets
+if [[ -f "${KOKORO_GFILE_DIR}/secrets_viewer_service_account.json" ]]; then
+ gcloud auth activate-service-account \
+ --key-file="${KOKORO_GFILE_DIR}/secrets_viewer_service_account.json" \
+ --project="cloud-devrel-kokoro-resources"
+fi
+
+# This script will create 3 files:
+# - testing/test-env.sh
+# - testing/service-account.json
+# - testing/client-secrets.json
+./scripts/decrypt-secrets.sh
+
+source ./testing/test-env.sh
+export GOOGLE_APPLICATION_CREDENTIALS=$(pwd)/testing/service-account.json
+
+# For cloud-run session, we activate the service account for gcloud sdk.
+gcloud auth activate-service-account \
+ --key-file "${GOOGLE_APPLICATION_CREDENTIALS}"
+
+export GOOGLE_CLIENT_SECRETS=$(pwd)/testing/client-secrets.json
+
+echo -e "\n******************** TESTING PROJECTS ********************"
+
+# Switch to 'fail at end' to allow all tests to complete before exiting.
+set +e
+# Use RTN to return a non-zero value if the test fails.
+RTN=0
+ROOT=$(pwd)
+# Find all requirements.txt in the samples directory (may break on whitespace).
+for file in samples/**/requirements.txt; do
+ cd "$ROOT"
+ # Navigate to the project folder.
+ file=$(dirname "$file")
+ cd "$file"
+
+ echo "------------------------------------------------------------"
+ echo "- testing $file"
+ echo "------------------------------------------------------------"
+
+ # Use nox to execute the tests for the project.
+ python3.6 -m nox -s "$RUN_TESTS_SESSION"
+ EXIT=$?
+
+ # If this is a periodic build, send the test log to the Build Cop Bot.
+ # See https://github.com/googleapis/repo-automation-bots/tree/master/packages/buildcop.
+ if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"periodic"* ]]; then
+ chmod +x $KOKORO_GFILE_DIR/linux_amd64/buildcop
+ $KOKORO_GFILE_DIR/linux_amd64/buildcop
+ fi
+
+ if [[ $EXIT -ne 0 ]]; then
+ RTN=1
+ echo -e "\n Testing failed: Nox returned a non-zero exit code. \n"
+ else
+ echo -e "\n Testing completed.\n"
+ fi
+
+done
+cd "$ROOT"
+
+# Workaround for Kokoro permissions issue: delete secrets
+rm testing/{test-env.sh,client-secrets.json,service-account.json}
+
+exit "$RTN"
diff --git a/.kokoro/trampoline.sh b/.kokoro/trampoline.sh
index e8c4251..f39236e 100755
--- a/.kokoro/trampoline.sh
+++ b/.kokoro/trampoline.sh
@@ -15,9 +15,14 @@
set -eo pipefail
-python3 "${KOKORO_GFILE_DIR}/trampoline_v1.py" || ret_code=$?
+# Always run the cleanup script, regardless of the success of bouncing into
+# the container.
+function cleanup() {
+ chmod +x ${KOKORO_GFILE_DIR}/trampoline_cleanup.sh
+ ${KOKORO_GFILE_DIR}/trampoline_cleanup.sh
+ echo "cleanup";
+}
+trap cleanup EXIT
-chmod +x ${KOKORO_GFILE_DIR}/trampoline_cleanup.sh
-${KOKORO_GFILE_DIR}/trampoline_cleanup.sh || true
-
-exit ${ret_code}
+$(dirname $0)/populate-secrets.sh # Secret Manager secrets.
+python3 "${KOKORO_GFILE_DIR}/trampoline_v1.py"
\ No newline at end of file
diff --git a/.kokoro/trampoline_v2.sh b/.kokoro/trampoline_v2.sh
new file mode 100755
index 0000000..719bcd5
--- /dev/null
+++ b/.kokoro/trampoline_v2.sh
@@ -0,0 +1,487 @@
+#!/usr/bin/env bash
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# trampoline_v2.sh
+#
+# This script does 3 things.
+#
+# 1. Prepare the Docker image for the test
+# 2. Run the Docker with appropriate flags to run the test
+# 3. Upload the newly built Docker image
+#
+# in a way that is somewhat compatible with trampoline_v1.
+#
+# To run this script, first download few files from gcs to /dev/shm.
+# (/dev/shm is passed into the container as KOKORO_GFILE_DIR).
+#
+# gsutil cp gs://cloud-devrel-kokoro-resources/python-docs-samples/secrets_viewer_service_account.json /dev/shm
+# gsutil cp gs://cloud-devrel-kokoro-resources/python-docs-samples/automl_secrets.txt /dev/shm
+#
+# Then run the script.
+# .kokoro/trampoline_v2.sh
+#
+# These environment variables are required:
+# TRAMPOLINE_IMAGE: The docker image to use.
+# TRAMPOLINE_DOCKERFILE: The location of the Dockerfile.
+#
+# You can optionally change these environment variables:
+# TRAMPOLINE_IMAGE_UPLOAD:
+# (true|false): Whether to upload the Docker image after the
+# successful builds.
+# TRAMPOLINE_BUILD_FILE: The script to run in the docker container.
+# TRAMPOLINE_WORKSPACE: The workspace path in the docker container.
+# Defaults to /workspace.
+# Potentially there are some repo specific envvars in .trampolinerc in
+# the project root.
+
+
+set -euo pipefail
+
+TRAMPOLINE_VERSION="2.0.5"
+
+if command -v tput >/dev/null && [[ -n "${TERM:-}" ]]; then
+ readonly IO_COLOR_RED="$(tput setaf 1)"
+ readonly IO_COLOR_GREEN="$(tput setaf 2)"
+ readonly IO_COLOR_YELLOW="$(tput setaf 3)"
+ readonly IO_COLOR_RESET="$(tput sgr0)"
+else
+ readonly IO_COLOR_RED=""
+ readonly IO_COLOR_GREEN=""
+ readonly IO_COLOR_YELLOW=""
+ readonly IO_COLOR_RESET=""
+fi
+
+function function_exists {
+ [ $(LC_ALL=C type -t $1)"" == "function" ]
+}
+
+# Logs a message using the given color. The first argument must be one
+# of the IO_COLOR_* variables defined above, such as
+# "${IO_COLOR_YELLOW}". The remaining arguments will be logged in the
+# given color. The log message will also have an RFC-3339 timestamp
+# prepended (in UTC). You can disable the color output by setting
+# TERM=vt100.
+function log_impl() {
+ local color="$1"
+ shift
+ local timestamp="$(date -u "+%Y-%m-%dT%H:%M:%SZ")"
+ echo "================================================================"
+ echo "${color}${timestamp}:" "$@" "${IO_COLOR_RESET}"
+ echo "================================================================"
+}
+
+# Logs the given message with normal coloring and a timestamp.
+function log() {
+ log_impl "${IO_COLOR_RESET}" "$@"
+}
+
+# Logs the given message in green with a timestamp.
+function log_green() {
+ log_impl "${IO_COLOR_GREEN}" "$@"
+}
+
+# Logs the given message in yellow with a timestamp.
+function log_yellow() {
+ log_impl "${IO_COLOR_YELLOW}" "$@"
+}
+
+# Logs the given message in red with a timestamp.
+function log_red() {
+ log_impl "${IO_COLOR_RED}" "$@"
+}
+
+readonly tmpdir=$(mktemp -d -t ci-XXXXXXXX)
+readonly tmphome="${tmpdir}/h"
+mkdir -p "${tmphome}"
+
+function cleanup() {
+ rm -rf "${tmpdir}"
+}
+trap cleanup EXIT
+
+RUNNING_IN_CI="${RUNNING_IN_CI:-false}"
+
+# The workspace in the container, defaults to /workspace.
+TRAMPOLINE_WORKSPACE="${TRAMPOLINE_WORKSPACE:-/workspace}"
+
+pass_down_envvars=(
+ # TRAMPOLINE_V2 variables.
+ # Tells scripts whether they are running as part of CI or not.
+ "RUNNING_IN_CI"
+ # Indicates which CI system we're in.
+ "TRAMPOLINE_CI"
+ # Indicates the version of the script.
+ "TRAMPOLINE_VERSION"
+)
+
+log_yellow "Building with Trampoline ${TRAMPOLINE_VERSION}"
+
+# Detect which CI systems we're in. If we're in any of the CI systems
+# we support, `RUNNING_IN_CI` will be true and `TRAMPOLINE_CI` will be
+# the name of the CI system. Both envvars will be passing down to the
+# container for telling which CI system we're in.
+if [[ -n "${KOKORO_BUILD_ID:-}" ]]; then
+ # descriptive env var for indicating it's on CI.
+ RUNNING_IN_CI="true"
+ TRAMPOLINE_CI="kokoro"
+ if [[ "${TRAMPOLINE_USE_LEGACY_SERVICE_ACCOUNT:-}" == "true" ]]; then
+ if [[ ! -f "${KOKORO_GFILE_DIR}/kokoro-trampoline.service-account.json" ]]; then
+ log_red "${KOKORO_GFILE_DIR}/kokoro-trampoline.service-account.json does not exist. Did you forget to mount cloud-devrel-kokoro-resources/trampoline? Aborting."
+ exit 1
+ fi
+ # This service account will be activated later.
+ TRAMPOLINE_SERVICE_ACCOUNT="${KOKORO_GFILE_DIR}/kokoro-trampoline.service-account.json"
+ else
+ if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then
+ gcloud auth list
+ fi
+ log_yellow "Configuring Container Registry access"
+ gcloud auth configure-docker --quiet
+ fi
+ pass_down_envvars+=(
+ # KOKORO dynamic variables.
+ "KOKORO_BUILD_NUMBER"
+ "KOKORO_BUILD_ID"
+ "KOKORO_JOB_NAME"
+ "KOKORO_GIT_COMMIT"
+ "KOKORO_GITHUB_COMMIT"
+ "KOKORO_GITHUB_PULL_REQUEST_NUMBER"
+ "KOKORO_GITHUB_PULL_REQUEST_COMMIT"
+ # For Build Cop Bot
+ "KOKORO_GITHUB_COMMIT_URL"
+ "KOKORO_GITHUB_PULL_REQUEST_URL"
+ )
+elif [[ "${TRAVIS:-}" == "true" ]]; then
+ RUNNING_IN_CI="true"
+ TRAMPOLINE_CI="travis"
+ pass_down_envvars+=(
+ "TRAVIS_BRANCH"
+ "TRAVIS_BUILD_ID"
+ "TRAVIS_BUILD_NUMBER"
+ "TRAVIS_BUILD_WEB_URL"
+ "TRAVIS_COMMIT"
+ "TRAVIS_COMMIT_MESSAGE"
+ "TRAVIS_COMMIT_RANGE"
+ "TRAVIS_JOB_NAME"
+ "TRAVIS_JOB_NUMBER"
+ "TRAVIS_JOB_WEB_URL"
+ "TRAVIS_PULL_REQUEST"
+ "TRAVIS_PULL_REQUEST_BRANCH"
+ "TRAVIS_PULL_REQUEST_SHA"
+ "TRAVIS_PULL_REQUEST_SLUG"
+ "TRAVIS_REPO_SLUG"
+ "TRAVIS_SECURE_ENV_VARS"
+ "TRAVIS_TAG"
+ )
+elif [[ -n "${GITHUB_RUN_ID:-}" ]]; then
+ RUNNING_IN_CI="true"
+ TRAMPOLINE_CI="github-workflow"
+ pass_down_envvars+=(
+ "GITHUB_WORKFLOW"
+ "GITHUB_RUN_ID"
+ "GITHUB_RUN_NUMBER"
+ "GITHUB_ACTION"
+ "GITHUB_ACTIONS"
+ "GITHUB_ACTOR"
+ "GITHUB_REPOSITORY"
+ "GITHUB_EVENT_NAME"
+ "GITHUB_EVENT_PATH"
+ "GITHUB_SHA"
+ "GITHUB_REF"
+ "GITHUB_HEAD_REF"
+ "GITHUB_BASE_REF"
+ )
+elif [[ "${CIRCLECI:-}" == "true" ]]; then
+ RUNNING_IN_CI="true"
+ TRAMPOLINE_CI="circleci"
+ pass_down_envvars+=(
+ "CIRCLE_BRANCH"
+ "CIRCLE_BUILD_NUM"
+ "CIRCLE_BUILD_URL"
+ "CIRCLE_COMPARE_URL"
+ "CIRCLE_JOB"
+ "CIRCLE_NODE_INDEX"
+ "CIRCLE_NODE_TOTAL"
+ "CIRCLE_PREVIOUS_BUILD_NUM"
+ "CIRCLE_PROJECT_REPONAME"
+ "CIRCLE_PROJECT_USERNAME"
+ "CIRCLE_REPOSITORY_URL"
+ "CIRCLE_SHA1"
+ "CIRCLE_STAGE"
+ "CIRCLE_USERNAME"
+ "CIRCLE_WORKFLOW_ID"
+ "CIRCLE_WORKFLOW_JOB_ID"
+ "CIRCLE_WORKFLOW_UPSTREAM_JOB_IDS"
+ "CIRCLE_WORKFLOW_WORKSPACE_ID"
+ )
+fi
+
+# Configure the service account for pulling the docker image.
+function repo_root() {
+ local dir="$1"
+ while [[ ! -d "${dir}/.git" ]]; do
+ dir="$(dirname "$dir")"
+ done
+ echo "${dir}"
+}
+
+# Detect the project root. In CI builds, we assume the script is in
+# the git tree and traverse from there, otherwise, traverse from `pwd`
+# to find `.git` directory.
+if [[ "${RUNNING_IN_CI:-}" == "true" ]]; then
+ PROGRAM_PATH="$(realpath "$0")"
+ PROGRAM_DIR="$(dirname "${PROGRAM_PATH}")"
+ PROJECT_ROOT="$(repo_root "${PROGRAM_DIR}")"
+else
+ PROJECT_ROOT="$(repo_root $(pwd))"
+fi
+
+log_yellow "Changing to the project root: ${PROJECT_ROOT}."
+cd "${PROJECT_ROOT}"
+
+# To support relative path for `TRAMPOLINE_SERVICE_ACCOUNT`, we need
+# to use this environment variable in `PROJECT_ROOT`.
+if [[ -n "${TRAMPOLINE_SERVICE_ACCOUNT:-}" ]]; then
+
+ mkdir -p "${tmpdir}/gcloud"
+ gcloud_config_dir="${tmpdir}/gcloud"
+
+ log_yellow "Using isolated gcloud config: ${gcloud_config_dir}."
+ export CLOUDSDK_CONFIG="${gcloud_config_dir}"
+
+ log_yellow "Using ${TRAMPOLINE_SERVICE_ACCOUNT} for authentication."
+ gcloud auth activate-service-account \
+ --key-file "${TRAMPOLINE_SERVICE_ACCOUNT}"
+ log_yellow "Configuring Container Registry access"
+ gcloud auth configure-docker --quiet
+fi
+
+required_envvars=(
+ # The basic trampoline configurations.
+ "TRAMPOLINE_IMAGE"
+ "TRAMPOLINE_BUILD_FILE"
+)
+
+if [[ -f "${PROJECT_ROOT}/.trampolinerc" ]]; then
+ source "${PROJECT_ROOT}/.trampolinerc"
+fi
+
+log_yellow "Checking environment variables."
+for e in "${required_envvars[@]}"
+do
+ if [[ -z "${!e:-}" ]]; then
+ log "Missing ${e} env var. Aborting."
+ exit 1
+ fi
+done
+
+# We want to support legacy style TRAMPOLINE_BUILD_FILE used with V1
+# script: e.g. "github/repo-name/.kokoro/run_tests.sh"
+TRAMPOLINE_BUILD_FILE="${TRAMPOLINE_BUILD_FILE#github/*/}"
+log_yellow "Using TRAMPOLINE_BUILD_FILE: ${TRAMPOLINE_BUILD_FILE}"
+
+# ignore error on docker operations and test execution
+set +e
+
+log_yellow "Preparing Docker image."
+# We only download the docker image in CI builds.
+if [[ "${RUNNING_IN_CI:-}" == "true" ]]; then
+ # Download the docker image specified by `TRAMPOLINE_IMAGE`
+
+ # We may want to add --max-concurrent-downloads flag.
+
+ log_yellow "Start pulling the Docker image: ${TRAMPOLINE_IMAGE}."
+ if docker pull "${TRAMPOLINE_IMAGE}"; then
+ log_green "Finished pulling the Docker image: ${TRAMPOLINE_IMAGE}."
+ has_image="true"
+ else
+ log_red "Failed pulling the Docker image: ${TRAMPOLINE_IMAGE}."
+ has_image="false"
+ fi
+else
+ # For local run, check if we have the image.
+ if docker images "${TRAMPOLINE_IMAGE}:latest" | grep "${TRAMPOLINE_IMAGE}"; then
+ has_image="true"
+ else
+ has_image="false"
+ fi
+fi
+
+
+# The default user for a Docker container has uid 0 (root). To avoid
+# creating root-owned files in the build directory we tell docker to
+# use the current user ID.
+user_uid="$(id -u)"
+user_gid="$(id -g)"
+user_name="$(id -un)"
+
+# To allow docker in docker, we add the user to the docker group in
+# the host os.
+docker_gid=$(cut -d: -f3 < <(getent group docker))
+
+update_cache="false"
+if [[ "${TRAMPOLINE_DOCKERFILE:-none}" != "none" ]]; then
+ # Build the Docker image from the source.
+ context_dir=$(dirname "${TRAMPOLINE_DOCKERFILE}")
+ docker_build_flags=(
+ "-f" "${TRAMPOLINE_DOCKERFILE}"
+ "-t" "${TRAMPOLINE_IMAGE}"
+ "--build-arg" "UID=${user_uid}"
+ "--build-arg" "USERNAME=${user_name}"
+ )
+ if [[ "${has_image}" == "true" ]]; then
+ docker_build_flags+=("--cache-from" "${TRAMPOLINE_IMAGE}")
+ fi
+
+ log_yellow "Start building the docker image."
+ if [[ "${TRAMPOLINE_VERBOSE:-false}" == "true" ]]; then
+ echo "docker build" "${docker_build_flags[@]}" "${context_dir}"
+ fi
+
+ # ON CI systems, we want to suppress docker build logs, only
+ # output the logs when it fails.
+ if [[ "${RUNNING_IN_CI:-}" == "true" ]]; then
+ if docker build "${docker_build_flags[@]}" "${context_dir}" \
+ > "${tmpdir}/docker_build.log" 2>&1; then
+ if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then
+ cat "${tmpdir}/docker_build.log"
+ fi
+
+ log_green "Finished building the docker image."
+ update_cache="true"
+ else
+ log_red "Failed to build the Docker image, aborting."
+ log_yellow "Dumping the build logs:"
+ cat "${tmpdir}/docker_build.log"
+ exit 1
+ fi
+ else
+ if docker build "${docker_build_flags[@]}" "${context_dir}"; then
+ log_green "Finished building the docker image."
+ update_cache="true"
+ else
+ log_red "Failed to build the Docker image, aborting."
+ exit 1
+ fi
+ fi
+else
+ if [[ "${has_image}" != "true" ]]; then
+ log_red "We do not have ${TRAMPOLINE_IMAGE} locally, aborting."
+ exit 1
+ fi
+fi
+
+# We use an array for the flags so they are easier to document.
+docker_flags=(
+ # Remove the container after it exists.
+ "--rm"
+
+ # Use the host network.
+ "--network=host"
+
+ # Run in priviledged mode. We are not using docker for sandboxing or
+ # isolation, just for packaging our dev tools.
+ "--privileged"
+
+ # Run the docker script with the user id. Because the docker image gets to
+ # write in ${PWD} you typically want this to be your user id.
+ # To allow docker in docker, we need to use docker gid on the host.
+ "--user" "${user_uid}:${docker_gid}"
+
+ # Pass down the USER.
+ "--env" "USER=${user_name}"
+
+ # Mount the project directory inside the Docker container.
+ "--volume" "${PROJECT_ROOT}:${TRAMPOLINE_WORKSPACE}"
+ "--workdir" "${TRAMPOLINE_WORKSPACE}"
+ "--env" "PROJECT_ROOT=${TRAMPOLINE_WORKSPACE}"
+
+ # Mount the temporary home directory.
+ "--volume" "${tmphome}:/h"
+ "--env" "HOME=/h"
+
+ # Allow docker in docker.
+ "--volume" "/var/run/docker.sock:/var/run/docker.sock"
+
+ # Mount the /tmp so that docker in docker can mount the files
+ # there correctly.
+ "--volume" "/tmp:/tmp"
+ # Pass down the KOKORO_GFILE_DIR and KOKORO_KEYSTORE_DIR
+ # TODO(tmatsuo): This part is not portable.
+ "--env" "TRAMPOLINE_SECRET_DIR=/secrets"
+ "--volume" "${KOKORO_GFILE_DIR:-/dev/shm}:/secrets/gfile"
+ "--env" "KOKORO_GFILE_DIR=/secrets/gfile"
+ "--volume" "${KOKORO_KEYSTORE_DIR:-/dev/shm}:/secrets/keystore"
+ "--env" "KOKORO_KEYSTORE_DIR=/secrets/keystore"
+)
+
+# Add an option for nicer output if the build gets a tty.
+if [[ -t 0 ]]; then
+ docker_flags+=("-it")
+fi
+
+# Passing down env vars
+for e in "${pass_down_envvars[@]}"
+do
+ if [[ -n "${!e:-}" ]]; then
+ docker_flags+=("--env" "${e}=${!e}")
+ fi
+done
+
+# If arguments are given, all arguments will become the commands run
+# in the container, otherwise run TRAMPOLINE_BUILD_FILE.
+if [[ $# -ge 1 ]]; then
+ log_yellow "Running the given commands '" "${@:1}" "' in the container."
+ readonly commands=("${@:1}")
+ if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then
+ echo docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}" "${commands[@]}"
+ fi
+ docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}" "${commands[@]}"
+else
+ log_yellow "Running the tests in a Docker container."
+ docker_flags+=("--entrypoint=${TRAMPOLINE_BUILD_FILE}")
+ if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then
+ echo docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}"
+ fi
+ docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}"
+fi
+
+
+test_retval=$?
+
+if [[ ${test_retval} -eq 0 ]]; then
+ log_green "Build finished with ${test_retval}"
+else
+ log_red "Build finished with ${test_retval}"
+fi
+
+# Only upload it when the test passes.
+if [[ "${update_cache}" == "true" ]] && \
+ [[ $test_retval == 0 ]] && \
+ [[ "${TRAMPOLINE_IMAGE_UPLOAD:-false}" == "true" ]]; then
+ log_yellow "Uploading the Docker image."
+ if docker push "${TRAMPOLINE_IMAGE}"; then
+ log_green "Finished uploading the Docker image."
+ else
+ log_red "Failed uploading the Docker image."
+ fi
+ # Call trampoline_after_upload_hook if it's defined.
+ if function_exists trampoline_after_upload_hook; then
+ trampoline_after_upload_hook
+ fi
+
+fi
+
+exit "${test_retval}"
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 0000000..a9024b1
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,17 @@
+# See https://pre-commit.com for more information
+# See https://pre-commit.com/hooks.html for more hooks
+repos:
+- repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v3.4.0
+ hooks:
+ - id: trailing-whitespace
+ - id: end-of-file-fixer
+ - id: check-yaml
+- repo: https://github.com/psf/black
+ rev: 19.10b0
+ hooks:
+ - id: black
+- repo: https://gitlab.com/pycqa/flake8
+ rev: 3.8.4
+ hooks:
+ - id: flake8
diff --git a/.trampolinerc b/.trampolinerc
new file mode 100644
index 0000000..995ee29
--- /dev/null
+++ b/.trampolinerc
@@ -0,0 +1,51 @@
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Template for .trampolinerc
+
+# Add required env vars here.
+required_envvars+=(
+ "STAGING_BUCKET"
+ "V2_STAGING_BUCKET"
+)
+
+# Add env vars which are passed down into the container here.
+pass_down_envvars+=(
+ "STAGING_BUCKET"
+ "V2_STAGING_BUCKET"
+)
+
+# Prevent unintentional override on the default image.
+if [[ "${TRAMPOLINE_IMAGE_UPLOAD:-false}" == "true" ]] && \
+ [[ -z "${TRAMPOLINE_IMAGE:-}" ]]; then
+ echo "Please set TRAMPOLINE_IMAGE if you want to upload the Docker image."
+ exit 1
+fi
+
+# Define the default value if it makes sense.
+if [[ -z "${TRAMPOLINE_IMAGE_UPLOAD:-}" ]]; then
+ TRAMPOLINE_IMAGE_UPLOAD=""
+fi
+
+if [[ -z "${TRAMPOLINE_IMAGE:-}" ]]; then
+ TRAMPOLINE_IMAGE=""
+fi
+
+if [[ -z "${TRAMPOLINE_DOCKERFILE:-}" ]]; then
+ TRAMPOLINE_DOCKERFILE=""
+fi
+
+if [[ -z "${TRAMPOLINE_BUILD_FILE:-}" ]]; then
+ TRAMPOLINE_BUILD_FILE=""
+fi
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
index b3d1f60..039f436 100644
--- a/CODE_OF_CONDUCT.md
+++ b/CODE_OF_CONDUCT.md
@@ -1,44 +1,95 @@
-# Contributor Code of Conduct
+# Code of Conduct
-As contributors and maintainers of this project,
-and in the interest of fostering an open and welcoming community,
-we pledge to respect all people who contribute through reporting issues,
-posting feature requests, updating documentation,
-submitting pull requests or patches, and other activities.
+## Our Pledge
-We are committed to making participation in this project
-a harassment-free experience for everyone,
-regardless of level of experience, gender, gender identity and expression,
-sexual orientation, disability, personal appearance,
-body size, race, ethnicity, age, religion, or nationality.
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project and
+our community a harassment-free experience for everyone, regardless of age, body
+size, disability, ethnicity, gender identity and expression, level of
+experience, education, socio-economic status, nationality, personal appearance,
+race, religion, or sexual identity and orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
-* The use of sexualized language or imagery
-* Personal attacks
-* Trolling or insulting/derogatory comments
-* Public or private harassment
-* Publishing other's private information,
-such as physical or electronic
-addresses, without explicit permission
-* Other unethical or unprofessional conduct.
+* The use of sexualized language or imagery and unwelcome sexual attention or
+ advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic
+ address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable
+behavior and are expected to take appropriate and fair corrective action in
+response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or reject
-comments, commits, code, wiki edits, issues, and other contributions
-that are not aligned to this Code of Conduct.
-By adopting this Code of Conduct,
-project maintainers commit themselves to fairly and consistently
-applying these principles to every aspect of managing this project.
-Project maintainers who do not follow or enforce the Code of Conduct
-may be permanently removed from the project team.
-
-This code of conduct applies both within project spaces and in public spaces
-when an individual is representing the project or its community.
-
-Instances of abusive, harassing, or otherwise unacceptable behavior
-may be reported by opening an issue
-or contacting one or more of the project maintainers.
-
-This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0,
-available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/)
+comments, commits, code, wiki edits, issues, and other contributions that are
+not aligned to this Code of Conduct, or to ban temporarily or permanently any
+contributor for other behaviors that they deem inappropriate, threatening,
+offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an appointed
+representative at an online or offline event. Representation of a project may be
+further defined and clarified by project maintainers.
+
+This Code of Conduct also applies outside the project spaces when the Project
+Steward has a reasonable belief that an individual's behavior may have a
+negative impact on the project or its community.
+
+## Conflict Resolution
+
+We do not believe that all conflict is bad; healthy debate and disagreement
+often yield positive results. However, it is never okay to be disrespectful or
+to engage in behavior that violates the project’s code of conduct.
+
+If you see someone violating the code of conduct, you are encouraged to address
+the behavior directly with those involved. Many issues can be resolved quickly
+and easily, and this gives people more control over the outcome of their
+dispute. If you are unable to resolve the matter for any reason, or if the
+behavior is threatening or harassing, report it. We are dedicated to providing
+an environment where participants feel welcome and safe.
+
+
+Reports should be directed to *googleapis-stewards@google.com*, the
+Project Steward(s) for *Google Cloud Client Libraries*. It is the Project Steward’s duty to
+receive and address reported violations of the code of conduct. They will then
+work with a committee consisting of representatives from the Open Source
+Programs Office and the Google Open Source Strategy team. If for any reason you
+are uncomfortable reaching out to the Project Steward, please email
+opensource@google.com.
+
+We will investigate every complaint, but you may not receive a direct response.
+We will use our discretion in determining when and how to follow up on reported
+incidents, which may range from not taking action to permanent expulsion from
+the project and project-sponsored spaces. We will notify the accused of the
+report and provide them an opportunity to discuss it before any action is taken.
+The identity of the reporter will be omitted from the details of the report
+supplied to the accused. In potentially harmful situations, such as ongoing
+harassment or threats to anyone's safety, we may take action without notice.
+
+## Attribution
+
+This Code of Conduct is adapted from the Contributor Covenant, version 1.4,
+available at
+https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
\ No newline at end of file
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index 8577546..92e2f10 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -21,8 +21,8 @@ In order to add a feature:
- The feature must be documented in both the API and narrative
documentation.
-- The feature must work fully on the following CPython versions: 2.7,
- 3.5, 3.6, 3.7 and 3.8 on both UNIX and Windows.
+- The feature must work fully on the following CPython versions:
+ 3.6, 3.7, 3.8 and 3.9 on both UNIX and Windows.
- The feature must not add unnecessary dependencies (where
"unnecessary" is of course subjective, but new dependencies should
@@ -80,25 +80,6 @@ We use `nox `__ to instrument our tests.
.. nox: https://pypi.org/project/nox/
-Note on Editable Installs / Develop Mode
-========================================
-
-- As mentioned previously, using ``setuptools`` in `develop mode`_
- or a ``pip`` `editable install`_ is not possible with this
- library. This is because this library uses `namespace packages`_.
- For context see `Issue #2316`_ and the relevant `PyPA issue`_.
-
- Since ``editable`` / ``develop`` mode can't be used, packages
- need to be installed directly. Hence your changes to the source
- tree don't get incorporated into the **already installed**
- package.
-
-.. _namespace packages: https://www.python.org/dev/peps/pep-0420/
-.. _Issue #2316: https://github.com/GoogleCloudPlatform/google-cloud-python/issues/2316
-.. _PyPA issue: https://github.com/pypa/packaging-problems/issues/12
-.. _develop mode: https://setuptools.readthedocs.io/en/latest/setuptools.html#development-mode
-.. _editable install: https://pip.pypa.io/en/stable/reference/pip_install/#editable-installs
-
*****************************************
I'm getting weird errors... Can you help?
*****************************************
@@ -130,6 +111,16 @@ Coding Style
should point to the official ``googleapis`` checkout and the
the branch should be the main branch on that remote (``master``).
+- This repository contains configuration for the
+ `pre-commit `__ tool, which automates checking
+ our linters during a commit. If you have it installed on your ``$PATH``,
+ you can enable enforcing those checks via:
+
+.. code-block:: bash
+
+ $ pre-commit install
+ pre-commit installed at .git/hooks/pre-commit
+
Exceptions to PEP8:
- Many unit tests use a helper method, ``_call_fut`` ("FUT" is short for
@@ -211,25 +202,24 @@ Supported Python Versions
We support:
-- `Python 3.5`_
- `Python 3.6`_
- `Python 3.7`_
- `Python 3.8`_
+- `Python 3.9`_
-.. _Python 3.5: https://docs.python.org/3.5/
.. _Python 3.6: https://docs.python.org/3.6/
.. _Python 3.7: https://docs.python.org/3.7/
.. _Python 3.8: https://docs.python.org/3.8/
+.. _Python 3.9: https://docs.python.org/3.9/
Supported versions can be found in our ``noxfile.py`` `config`_.
.. _config: https://github.com/googleapis/python-memcache/blob/master/noxfile.py
-Python 2.7 support is deprecated. All code changes should maintain Python 2.7 compatibility until January 1, 2020.
We also explicitly decided to support Python 3 beginning with version
-3.5. Reasons for this include:
+3.6. Reasons for this include:
- Encouraging use of newest versions of Python 3
- Taking the lead of `prominent`_ open-source `projects`_
diff --git a/MANIFEST.in b/MANIFEST.in
index 68855ab..e9e29d1 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -20,3 +20,6 @@ recursive-include google *.json *.proto
recursive-include tests *
global-exclude *.py[co]
global-exclude __pycache__
+
+# Exclude scripts for samples readmegen
+prune scripts/readme-gen
\ No newline at end of file
diff --git a/docs/_templates/layout.html b/docs/_templates/layout.html
index 228529e..6316a53 100644
--- a/docs/_templates/layout.html
+++ b/docs/_templates/layout.html
@@ -21,8 +21,8 @@
- On January 1, 2020 this library will no longer support Python 2 on the latest released version.
- Previously released library versions will continue to be available. For more information please
+ As of January 1, 2020 this library no longer supports Python 2 on the latest released version.
+ Library versions released prior to that date will continue to be available. For more information please
visit
Python 2 support on Google Cloud.
{% block body %} {% endblock %}
diff --git a/docs/conf.py b/docs/conf.py
index 54a5d17..a84e615 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -20,12 +20,16 @@
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
+# For plugins that can not read conf.py.
+# See also: https://github.com/docascode/sphinx-docfx-yaml/issues/85
+sys.path.insert(0, os.path.abspath("."))
+
__version__ = ""
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
-needs_sphinx = "1.6.3"
+needs_sphinx = "1.5.5"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
@@ -35,6 +39,7 @@
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
+ "sphinx.ext.doctest",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
@@ -43,7 +48,7 @@
# autodoc/autosummary flags
autoclass_content = "both"
-autodoc_default_flags = ["members"]
+autodoc_default_options = {"members": True}
autosummary_generate = True
@@ -90,7 +95,12 @@
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
-exclude_patterns = ["_build"]
+exclude_patterns = [
+ "_build",
+ "samples/AUTHORING_GUIDE.md",
+ "samples/CONTRIBUTING.md",
+ "samples/snippets/README.rst",
+]
# The reST default role (used for this markup: `text`) to use for all
# documents.
@@ -335,10 +345,11 @@
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
- "python": ("http://python.readthedocs.org/en/latest/", None),
- "google-auth": ("https://google-auth.readthedocs.io/en/stable", None),
- "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None),
- "grpc": ("https://grpc.io/grpc/python/", None),
+ "python": ("https://python.readthedocs.org/en/latest/", None),
+ "google-auth": ("https://googleapis.dev/python/google-auth/latest/", None),
+ "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None,),
+ "grpc": ("https://grpc.github.io/grpc/python/", None),
+ "proto-plus": ("https://proto-plus-python.readthedocs.io/en/latest/", None),
}
diff --git a/docs/memcache_v1beta2/services.rst b/docs/memcache_v1beta2/services.rst
index 72a3e8a..6b2845e 100644
--- a/docs/memcache_v1beta2/services.rst
+++ b/docs/memcache_v1beta2/services.rst
@@ -1,6 +1,6 @@
-Client for Google Cloud Memcache API
-====================================
+Services for Google Cloud Memcache v1beta2 API
+==============================================
-.. automodule:: google.cloud.memcache_v1beta2
+.. automodule:: google.cloud.memcache_v1beta2.services.cloud_memcache
:members:
:inherited-members:
diff --git a/docs/memcache_v1beta2/types.rst b/docs/memcache_v1beta2/types.rst
index 746c77b..1b47aa6 100644
--- a/docs/memcache_v1beta2/types.rst
+++ b/docs/memcache_v1beta2/types.rst
@@ -1,5 +1,6 @@
-Types for Google Cloud Memcache API
-===================================
+Types for Google Cloud Memcache v1beta2 API
+===========================================
.. automodule:: google.cloud.memcache_v1beta2.types
:members:
+ :show-inheritance:
diff --git a/google/cloud/memcache/__init__.py b/google/cloud/memcache/__init__.py
index 11d5e0d..b66f063 100644
--- a/google/cloud/memcache/__init__.py
+++ b/google/cloud/memcache/__init__.py
@@ -15,7 +15,9 @@
# limitations under the License.
#
-
+from google.cloud.memcache_v1beta2.services.cloud_memcache.async_client import (
+ CloudMemcacheAsyncClient,
+)
from google.cloud.memcache_v1beta2.services.cloud_memcache.client import (
CloudMemcacheClient,
)
@@ -36,6 +38,7 @@
__all__ = (
"ApplyParametersRequest",
+ "CloudMemcacheAsyncClient",
"CloudMemcacheClient",
"CreateInstanceRequest",
"DeleteInstanceRequest",
diff --git a/google/cloud/memcache_v1beta2/__init__.py b/google/cloud/memcache_v1beta2/__init__.py
index 6c9725e..13cd09f 100644
--- a/google/cloud/memcache_v1beta2/__init__.py
+++ b/google/cloud/memcache_v1beta2/__init__.py
@@ -15,7 +15,6 @@
# limitations under the License.
#
-
from .services.cloud_memcache import CloudMemcacheClient
from .types.cloud_memcache import ApplyParametersRequest
from .types.cloud_memcache import CreateInstanceRequest
diff --git a/google/cloud/memcache_v1beta2/services/cloud_memcache/__init__.py b/google/cloud/memcache_v1beta2/services/cloud_memcache/__init__.py
index 0cc5e89..8524cb4 100644
--- a/google/cloud/memcache_v1beta2/services/cloud_memcache/__init__.py
+++ b/google/cloud/memcache_v1beta2/services/cloud_memcache/__init__.py
@@ -16,5 +16,9 @@
#
from .client import CloudMemcacheClient
+from .async_client import CloudMemcacheAsyncClient
-__all__ = ("CloudMemcacheClient",)
+__all__ = (
+ "CloudMemcacheClient",
+ "CloudMemcacheAsyncClient",
+)
diff --git a/google/cloud/memcache_v1beta2/services/cloud_memcache/async_client.py b/google/cloud/memcache_v1beta2/services/cloud_memcache/async_client.py
new file mode 100644
index 0000000..9373a06
--- /dev/null
+++ b/google/cloud/memcache_v1beta2/services/cloud_memcache/async_client.py
@@ -0,0 +1,844 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from collections import OrderedDict
+import functools
+import re
+from typing import Dict, Sequence, Tuple, Type, Union
+import pkg_resources
+
+import google.api_core.client_options as ClientOptions # type: ignore
+from google.api_core import exceptions # type: ignore
+from google.api_core import gapic_v1 # type: ignore
+from google.api_core import retry as retries # type: ignore
+from google.auth import credentials # type: ignore
+from google.oauth2 import service_account # type: ignore
+
+from google.api_core import operation # type: ignore
+from google.api_core import operation_async # type: ignore
+from google.cloud.memcache_v1beta2.services.cloud_memcache import pagers
+from google.cloud.memcache_v1beta2.types import cloud_memcache
+from google.protobuf import empty_pb2 as empty # type: ignore
+from google.protobuf import field_mask_pb2 as field_mask # type: ignore
+from google.protobuf import timestamp_pb2 as timestamp # type: ignore
+
+from .transports.base import CloudMemcacheTransport, DEFAULT_CLIENT_INFO
+from .transports.grpc_asyncio import CloudMemcacheGrpcAsyncIOTransport
+from .client import CloudMemcacheClient
+
+
+class CloudMemcacheAsyncClient:
+ """Configures and manages Cloud Memorystore for Memcached instances.
+
+ The ``memcache.googleapis.com`` service implements the Google Cloud
+ Memorystore for Memcached API and defines the following resource
+ model for managing Memorystore Memcached (also called Memcached
+ below) instances:
+
+ - The service works with a collection of cloud projects, named:
+ ``/projects/*``
+ - Each project has a collection of available locations, named:
+ ``/locations/*``
+ - Each location has a collection of Memcached instances, named:
+ ``/instances/*``
+ - As such, Memcached instances are resources of the form:
+ ``/projects/{project_id}/locations/{location_id}/instances/{instance_id}``
+
+ Note that location_id must be refering to a GCP ``region``; for
+ example:
+
+ - ``projects/my-memcached-project/locations/us-central1/instances/my-memcached``
+ """
+
+ _client: CloudMemcacheClient
+
+ DEFAULT_ENDPOINT = CloudMemcacheClient.DEFAULT_ENDPOINT
+ DEFAULT_MTLS_ENDPOINT = CloudMemcacheClient.DEFAULT_MTLS_ENDPOINT
+
+ instance_path = staticmethod(CloudMemcacheClient.instance_path)
+ parse_instance_path = staticmethod(CloudMemcacheClient.parse_instance_path)
+
+ common_billing_account_path = staticmethod(
+ CloudMemcacheClient.common_billing_account_path
+ )
+ parse_common_billing_account_path = staticmethod(
+ CloudMemcacheClient.parse_common_billing_account_path
+ )
+
+ common_folder_path = staticmethod(CloudMemcacheClient.common_folder_path)
+ parse_common_folder_path = staticmethod(
+ CloudMemcacheClient.parse_common_folder_path
+ )
+
+ common_organization_path = staticmethod(
+ CloudMemcacheClient.common_organization_path
+ )
+ parse_common_organization_path = staticmethod(
+ CloudMemcacheClient.parse_common_organization_path
+ )
+
+ common_project_path = staticmethod(CloudMemcacheClient.common_project_path)
+ parse_common_project_path = staticmethod(
+ CloudMemcacheClient.parse_common_project_path
+ )
+
+ common_location_path = staticmethod(CloudMemcacheClient.common_location_path)
+ parse_common_location_path = staticmethod(
+ CloudMemcacheClient.parse_common_location_path
+ )
+
+ from_service_account_file = CloudMemcacheClient.from_service_account_file
+ from_service_account_json = from_service_account_file
+
+ @property
+ def transport(self) -> CloudMemcacheTransport:
+ """Return the transport used by the client instance.
+
+ Returns:
+ CloudMemcacheTransport: The transport used by the client instance.
+ """
+ return self._client.transport
+
+ get_transport_class = functools.partial(
+ type(CloudMemcacheClient).get_transport_class, type(CloudMemcacheClient)
+ )
+
+ def __init__(
+ self,
+ *,
+ credentials: credentials.Credentials = None,
+ transport: Union[str, CloudMemcacheTransport] = "grpc_asyncio",
+ client_options: ClientOptions = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ ) -> None:
+ """Instantiate the cloud memcache client.
+
+ Args:
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ transport (Union[str, ~.CloudMemcacheTransport]): The
+ transport to use. If set to None, a transport is chosen
+ automatically.
+ client_options (ClientOptions): Custom options for the client. It
+ won't take effect if a ``transport`` instance is provided.
+ (1) The ``api_endpoint`` property can be used to override the
+ default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
+ environment variable can also be used to override the endpoint:
+ "always" (always use the default mTLS endpoint), "never" (always
+ use the default regular endpoint) and "auto" (auto switch to the
+ default mTLS endpoint if client certificate is present, this is
+ the default value). However, the ``api_endpoint`` property takes
+ precedence if provided.
+ (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
+ is "true", then the ``client_cert_source`` property can be used
+ to provide client certificate for mutual TLS transport. If
+ not provided, the default SSL client certificate will be used if
+ present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
+ set, no client certificate will be used.
+
+ Raises:
+ google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
+ creation failed for any reason.
+ """
+
+ self._client = CloudMemcacheClient(
+ credentials=credentials,
+ transport=transport,
+ client_options=client_options,
+ client_info=client_info,
+ )
+
+ async def list_instances(
+ self,
+ request: cloud_memcache.ListInstancesRequest = None,
+ *,
+ parent: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> pagers.ListInstancesAsyncPager:
+ r"""Lists Instances in a given project and location.
+
+ Args:
+ request (:class:`~.cloud_memcache.ListInstancesRequest`):
+ The request object. Request for
+ [ListInstances][google.cloud.memcache.v1beta2.CloudMemcache.ListInstances].
+ parent (:class:`str`):
+ Required. The resource name of the instance location
+ using the form:
+ ``projects/{project_id}/locations/{location_id}`` where
+ ``location_id`` refers to a GCP region
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ ~.pagers.ListInstancesAsyncPager:
+ Response for
+ [ListInstances][google.cloud.memcache.v1beta2.CloudMemcache.ListInstances].
+
+ Iterating over this object will yield results and
+ resolve additional pages automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = cloud_memcache.ListInstancesRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.list_instances,
+ default_timeout=1200.0,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__aiter__` convenience method.
+ response = pagers.ListInstancesAsyncPager(
+ method=rpc, request=request, response=response, metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def get_instance(
+ self,
+ request: cloud_memcache.GetInstanceRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> cloud_memcache.Instance:
+ r"""Gets details of a single Instance.
+
+ Args:
+ request (:class:`~.cloud_memcache.GetInstanceRequest`):
+ The request object. Request for
+ [GetInstance][google.cloud.memcache.v1beta2.CloudMemcache.GetInstance].
+ name (:class:`str`):
+ Required. Memcached instance resource name in the
+ format:
+ ``projects/{project_id}/locations/{location_id}/instances/{instance_id}``
+ where ``location_id`` refers to a GCP region
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ ~.cloud_memcache.Instance:
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = cloud_memcache.GetInstanceRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.get_instance,
+ default_timeout=1200.0,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
+ async def create_instance(
+ self,
+ request: cloud_memcache.CreateInstanceRequest = None,
+ *,
+ parent: str = None,
+ instance_id: str = None,
+ resource: cloud_memcache.Instance = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Creates a new Instance in a given project and
+ location.
+
+ Args:
+ request (:class:`~.cloud_memcache.CreateInstanceRequest`):
+ The request object. Request for
+ [CreateInstance][google.cloud.memcache.v1beta2.CloudMemcache.CreateInstance].
+ parent (:class:`str`):
+ Required. The resource name of the instance location
+ using the form:
+ ``projects/{project_id}/locations/{location_id}`` where
+ ``location_id`` refers to a GCP region
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ instance_id (:class:`str`):
+ Required. The logical name of the Memcached instance in
+ the user project with the following restrictions:
+
+ - Must contain only lowercase letters, numbers, and
+ hyphens.
+ - Must start with a letter.
+ - Must be between 1-40 characters.
+ - Must end with a number or a letter.
+ - Must be unique within the user project / location
+
+ This corresponds to the ``instance_id`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ resource (:class:`~.cloud_memcache.Instance`):
+ Required. A Memcached [Instance] resource
+ This corresponds to the ``resource`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ ~.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be
+ :class:``~.cloud_memcache.Instance``:
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent, instance_id, resource])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = cloud_memcache.CreateInstanceRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if parent is not None:
+ request.parent = parent
+ if instance_id is not None:
+ request.instance_id = instance_id
+ if resource is not None:
+ request.resource = resource
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.create_instance,
+ default_timeout=1200.0,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ cloud_memcache.Instance,
+ metadata_type=cloud_memcache.OperationMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def update_instance(
+ self,
+ request: cloud_memcache.UpdateInstanceRequest = None,
+ *,
+ update_mask: field_mask.FieldMask = None,
+ resource: cloud_memcache.Instance = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Updates an existing Instance in a given project and
+ location.
+
+ Args:
+ request (:class:`~.cloud_memcache.UpdateInstanceRequest`):
+ The request object. Request for
+ [UpdateInstance][google.cloud.memcache.v1beta2.CloudMemcache.UpdateInstance].
+ update_mask (:class:`~.field_mask.FieldMask`):
+ Required. Mask of fields to update.
+
+ - ``displayName``
+
+ This corresponds to the ``update_mask`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ resource (:class:`~.cloud_memcache.Instance`):
+ Required. A Memcached [Instance] resource. Only fields
+ specified in update_mask are updated.
+ This corresponds to the ``resource`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ ~.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be
+ :class:``~.cloud_memcache.Instance``:
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([update_mask, resource])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = cloud_memcache.UpdateInstanceRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if update_mask is not None:
+ request.update_mask = update_mask
+ if resource is not None:
+ request.resource = resource
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.update_instance,
+ default_timeout=1200.0,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata(
+ (("resource.name", request.resource.name),)
+ ),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ cloud_memcache.Instance,
+ metadata_type=cloud_memcache.OperationMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def update_parameters(
+ self,
+ request: cloud_memcache.UpdateParametersRequest = None,
+ *,
+ name: str = None,
+ update_mask: field_mask.FieldMask = None,
+ parameters: cloud_memcache.MemcacheParameters = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Updates the defined Memcached Parameters for an
+ existing Instance. This method only stages the
+ parameters, it must be followed by ApplyParameters to
+ apply the parameters to nodes of the Memcached Instance.
+
+ Args:
+ request (:class:`~.cloud_memcache.UpdateParametersRequest`):
+ The request object. Request for
+ [UpdateParameters][google.cloud.memcache.v1beta2.CloudMemcache.UpdateParameters].
+ name (:class:`str`):
+ Required. Resource name of the
+ Memcached instance for which the
+ parameters should be updated.
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ update_mask (:class:`~.field_mask.FieldMask`):
+ Required. Mask of fields to update.
+ This corresponds to the ``update_mask`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ parameters (:class:`~.cloud_memcache.MemcacheParameters`):
+ The parameters to apply to the
+ instance.
+ This corresponds to the ``parameters`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ ~.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be
+ :class:``~.cloud_memcache.Instance``:
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name, update_mask, parameters])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = cloud_memcache.UpdateParametersRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+ if update_mask is not None:
+ request.update_mask = update_mask
+ if parameters is not None:
+ request.parameters = parameters
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.update_parameters,
+ default_timeout=1200.0,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ cloud_memcache.Instance,
+ metadata_type=cloud_memcache.OperationMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def delete_instance(
+ self,
+ request: cloud_memcache.DeleteInstanceRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Deletes a single Instance.
+
+ Args:
+ request (:class:`~.cloud_memcache.DeleteInstanceRequest`):
+ The request object. Request for
+ [DeleteInstance][google.cloud.memcache.v1beta2.CloudMemcache.DeleteInstance].
+ name (:class:`str`):
+ Memcached instance resource name in the format:
+ ``projects/{project_id}/locations/{location_id}/instances/{instance_id}``
+ where ``location_id`` refers to a GCP region
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ ~.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be
+ :class:``~.empty.Empty``: A generic empty message that
+ you can re-use to avoid defining duplicated empty
+ messages in your APIs. A typical example is to use it as
+ the request or the response type of an API method. For
+ instance:
+
+ ::
+
+ service Foo {
+ rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
+ }
+
+ The JSON representation for ``Empty`` is empty JSON
+ object ``{}``.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = cloud_memcache.DeleteInstanceRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.delete_instance,
+ default_timeout=1200.0,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ empty.Empty,
+ metadata_type=cloud_memcache.OperationMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def apply_parameters(
+ self,
+ request: cloud_memcache.ApplyParametersRequest = None,
+ *,
+ name: str = None,
+ node_ids: Sequence[str] = None,
+ apply_all: bool = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""ApplyParameters will update current set of Parameters
+ to the set of specified nodes of the Memcached Instance.
+
+ Args:
+ request (:class:`~.cloud_memcache.ApplyParametersRequest`):
+ The request object. Request for
+ [ApplyParameters][google.cloud.memcache.v1beta2.CloudMemcache.ApplyParameters].
+ name (:class:`str`):
+ Required. Resource name of the
+ Memcached instance for which parameter
+ group updates should be applied.
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ node_ids (:class:`Sequence[str]`):
+ Nodes to which we should apply the
+ instance-level parameter group.
+ This corresponds to the ``node_ids`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ apply_all (:class:`bool`):
+ Whether to apply instance-level
+ parameter group to all nodes. If set to
+ true, will explicitly restrict users
+ from specifying any nodes, and apply
+ parameter group updates to all nodes
+ within the instance.
+ This corresponds to the ``apply_all`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ ~.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be
+ :class:``~.cloud_memcache.Instance``:
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name, node_ids, apply_all])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = cloud_memcache.ApplyParametersRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+
+ if name is not None:
+ request.name = name
+ if apply_all is not None:
+ request.apply_all = apply_all
+
+ if node_ids:
+ request.node_ids.extend(node_ids)
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.apply_parameters,
+ default_timeout=1200.0,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ cloud_memcache.Instance,
+ metadata_type=cloud_memcache.OperationMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+
+try:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
+ gapic_version=pkg_resources.get_distribution("google-cloud-memcache",).version,
+ )
+except pkg_resources.DistributionNotFound:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
+
+
+__all__ = ("CloudMemcacheAsyncClient",)
diff --git a/google/cloud/memcache_v1beta2/services/cloud_memcache/client.py b/google/cloud/memcache_v1beta2/services/cloud_memcache/client.py
index 19c6bde..0a1ef7c 100644
--- a/google/cloud/memcache_v1beta2/services/cloud_memcache/client.py
+++ b/google/cloud/memcache_v1beta2/services/cloud_memcache/client.py
@@ -16,26 +16,33 @@
#
from collections import OrderedDict
+from distutils import util
+import os
import re
-from typing import Callable, Dict, Sequence, Tuple, Type, Union
+from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
-import google.api_core.client_options as ClientOptions # type: ignore
+from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
+from google.auth.transport import mtls # type: ignore
+from google.auth.transport.grpc import SslCredentials # type: ignore
+from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
-from google.api_core import operation
+from google.api_core import operation # type: ignore
+from google.api_core import operation_async # type: ignore
from google.cloud.memcache_v1beta2.services.cloud_memcache import pagers
from google.cloud.memcache_v1beta2.types import cloud_memcache
from google.protobuf import empty_pb2 as empty # type: ignore
from google.protobuf import field_mask_pb2 as field_mask # type: ignore
from google.protobuf import timestamp_pb2 as timestamp # type: ignore
-from .transports.base import CloudMemcacheTransport
+from .transports.base import CloudMemcacheTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import CloudMemcacheGrpcTransport
+from .transports.grpc_asyncio import CloudMemcacheGrpcAsyncIOTransport
class CloudMemcacheClientMeta(type):
@@ -48,8 +55,9 @@ class CloudMemcacheClientMeta(type):
_transport_registry = OrderedDict() # type: Dict[str, Type[CloudMemcacheTransport]]
_transport_registry["grpc"] = CloudMemcacheGrpcTransport
+ _transport_registry["grpc_asyncio"] = CloudMemcacheGrpcAsyncIOTransport
- def get_transport_class(cls, label: str = None) -> Type[CloudMemcacheTransport]:
+ def get_transport_class(cls, label: str = None,) -> Type[CloudMemcacheTransport]:
"""Return an appropriate transport class.
Args:
@@ -145,11 +153,20 @@ def from_service_account_file(cls, filename: str, *args, **kwargs):
from_service_account_json = from_service_account_file
+ @property
+ def transport(self) -> CloudMemcacheTransport:
+ """Return the transport used by the client instance.
+
+ Returns:
+ CloudMemcacheTransport: The transport used by the client instance.
+ """
+ return self._transport
+
@staticmethod
- def instance_path(project: str, location: str, instance: str) -> str:
+ def instance_path(project: str, location: str, instance: str,) -> str:
"""Return a fully-qualified instance string."""
return "projects/{project}/locations/{location}/instances/{instance}".format(
- project=project, location=location, instance=instance
+ project=project, location=location, instance=instance,
)
@staticmethod
@@ -161,12 +178,72 @@ def parse_instance_path(path: str) -> Dict[str, str]:
)
return m.groupdict() if m else {}
+ @staticmethod
+ def common_billing_account_path(billing_account: str,) -> str:
+ """Return a fully-qualified billing_account string."""
+ return "billingAccounts/{billing_account}".format(
+ billing_account=billing_account,
+ )
+
+ @staticmethod
+ def parse_common_billing_account_path(path: str) -> Dict[str, str]:
+ """Parse a billing_account path into its component segments."""
+ m = re.match(r"^billingAccounts/(?P
.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_folder_path(folder: str,) -> str:
+ """Return a fully-qualified folder string."""
+ return "folders/{folder}".format(folder=folder,)
+
+ @staticmethod
+ def parse_common_folder_path(path: str) -> Dict[str, str]:
+ """Parse a folder path into its component segments."""
+ m = re.match(r"^folders/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_organization_path(organization: str,) -> str:
+ """Return a fully-qualified organization string."""
+ return "organizations/{organization}".format(organization=organization,)
+
+ @staticmethod
+ def parse_common_organization_path(path: str) -> Dict[str, str]:
+ """Parse a organization path into its component segments."""
+ m = re.match(r"^organizations/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_project_path(project: str,) -> str:
+ """Return a fully-qualified project string."""
+ return "projects/{project}".format(project=project,)
+
+ @staticmethod
+ def parse_common_project_path(path: str) -> Dict[str, str]:
+ """Parse a project path into its component segments."""
+ m = re.match(r"^projects/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_location_path(project: str, location: str,) -> str:
+ """Return a fully-qualified location string."""
+ return "projects/{project}/locations/{location}".format(
+ project=project, location=location,
+ )
+
+ @staticmethod
+ def parse_common_location_path(path: str) -> Dict[str, str]:
+ """Parse a location path into its component segments."""
+ m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
def __init__(
self,
*,
- credentials: credentials.Credentials = None,
- transport: Union[str, CloudMemcacheTransport] = None,
- client_options: ClientOptions = None,
+ credentials: Optional[credentials.Credentials] = None,
+ transport: Union[str, CloudMemcacheTransport, None] = None,
+ client_options: Optional[client_options_lib.ClientOptions] = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the cloud memcache client.
@@ -179,66 +256,102 @@ def __init__(
transport (Union[str, ~.CloudMemcacheTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
- client_options (ClientOptions): Custom options for the client.
+ client_options (client_options_lib.ClientOptions): Custom options for the
+ client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
- default endpoint provided by the client.
- (2) If ``transport`` argument is None, ``client_options`` can be
- used to create a mutual TLS transport. If ``client_cert_source``
- is provided, mutual TLS transport will be created with the given
- ``api_endpoint`` or the default mTLS endpoint, and the client
- SSL credentials obtained from ``client_cert_source``.
+ default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
+ environment variable can also be used to override the endpoint:
+ "always" (always use the default mTLS endpoint), "never" (always
+ use the default regular endpoint) and "auto" (auto switch to the
+ default mTLS endpoint if client certificate is present, this is
+ the default value). However, the ``api_endpoint`` property takes
+ precedence if provided.
+ (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
+ is "true", then the ``client_cert_source`` property can be used
+ to provide client certificate for mutual TLS transport. If
+ not provided, the default SSL client certificate will be used if
+ present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
+ set, no client certificate will be used.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
Raises:
- google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
+ google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
- client_options = ClientOptions.from_dict(client_options)
+ client_options = client_options_lib.from_dict(client_options)
+ if client_options is None:
+ client_options = client_options_lib.ClientOptions()
+
+ # Create SSL credentials for mutual TLS if needed.
+ use_client_cert = bool(
+ util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
+ )
+
+ ssl_credentials = None
+ is_mtls = False
+ if use_client_cert:
+ if client_options.client_cert_source:
+ import grpc # type: ignore
+
+ cert, key = client_options.client_cert_source()
+ ssl_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
+ is_mtls = True
+ else:
+ creds = SslCredentials()
+ is_mtls = creds.is_mtls
+ ssl_credentials = creds.ssl_credentials if is_mtls else None
+
+ # Figure out which api endpoint to use.
+ if client_options.api_endpoint is not None:
+ api_endpoint = client_options.api_endpoint
+ else:
+ use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
+ if use_mtls_env == "never":
+ api_endpoint = self.DEFAULT_ENDPOINT
+ elif use_mtls_env == "always":
+ api_endpoint = self.DEFAULT_MTLS_ENDPOINT
+ elif use_mtls_env == "auto":
+ api_endpoint = (
+ self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT
+ )
+ else:
+ raise MutualTLSChannelError(
+ "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
+ )
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, CloudMemcacheTransport):
# transport is a CloudMemcacheTransport instance.
- if credentials:
+ if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
+ if client_options.scopes:
+ raise ValueError(
+ "When providing a transport instance, "
+ "provide its scopes directly."
+ )
self._transport = transport
- elif client_options is None or (
- client_options.api_endpoint is None
- and client_options.client_cert_source is None
- ):
- # Don't trigger mTLS if we get an empty ClientOptions.
+ else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
- credentials=credentials, host=self.DEFAULT_ENDPOINT
- )
- else:
- # We have a non-empty ClientOptions. If client_cert_source is
- # provided, trigger mTLS with user provided endpoint or the default
- # mTLS endpoint.
- if client_options.client_cert_source:
- api_mtls_endpoint = (
- client_options.api_endpoint
- if client_options.api_endpoint
- else self.DEFAULT_MTLS_ENDPOINT
- )
- else:
- api_mtls_endpoint = None
-
- api_endpoint = (
- client_options.api_endpoint
- if client_options.api_endpoint
- else self.DEFAULT_ENDPOINT
- )
-
- self._transport = CloudMemcacheGrpcTransport(
credentials=credentials,
+ credentials_file=client_options.credentials_file,
host=api_endpoint,
- api_mtls_endpoint=api_mtls_endpoint,
- client_cert_source=client_options.client_cert_source,
+ scopes=client_options.scopes,
+ ssl_channel_credentials=ssl_credentials,
+ quota_project_id=client_options.quota_project_id,
+ client_info=client_info,
)
def list_instances(
@@ -283,27 +396,29 @@ def list_instances(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([parent]):
+ has_flattened_params = any([parent])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = cloud_memcache.ListInstancesRequest(request)
+ # Minor optimization to avoid making a copy if the user passes
+ # in a cloud_memcache.ListInstancesRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, cloud_memcache.ListInstancesRequest):
+ request = cloud_memcache.ListInstancesRequest(request)
- # If we have keyword arguments corresponding to fields on the
- # request, apply these.
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
- if parent is not None:
- request.parent = parent
+ if parent is not None:
+ request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method.wrap_method(
- self._transport.list_instances,
- default_timeout=None,
- client_info=_client_info,
- )
+ rpc = self._transport._wrapped_methods[self._transport.list_instances]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -312,12 +427,12 @@ def list_instances(
)
# Send the request.
- response = rpc(request, retry=retry, timeout=timeout, metadata=metadata)
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListInstancesPager(
- method=rpc, request=request, response=response
+ method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
@@ -360,25 +475,29 @@ def get_instance(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([name]):
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = cloud_memcache.GetInstanceRequest(request)
+ # Minor optimization to avoid making a copy if the user passes
+ # in a cloud_memcache.GetInstanceRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, cloud_memcache.GetInstanceRequest):
+ request = cloud_memcache.GetInstanceRequest(request)
- # If we have keyword arguments corresponding to fields on the
- # request, apply these.
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
- if name is not None:
- request.name = name
+ if name is not None:
+ request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method.wrap_method(
- self._transport.get_instance, default_timeout=None, client_info=_client_info
- )
+ rpc = self._transport._wrapped_methods[self._transport.get_instance]
# Certain fields should be provided within the metadata header;
# add these here.
@@ -387,7 +506,7 @@ def get_instance(
)
# Send the request.
- response = rpc(request, retry=retry, timeout=timeout, metadata=metadata)
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
@@ -455,34 +574,42 @@ def create_instance(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([parent, instance_id, resource]):
+ has_flattened_params = any([parent, instance_id, resource])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = cloud_memcache.CreateInstanceRequest(request)
+ # Minor optimization to avoid making a copy if the user passes
+ # in a cloud_memcache.CreateInstanceRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, cloud_memcache.CreateInstanceRequest):
+ request = cloud_memcache.CreateInstanceRequest(request)
- # If we have keyword arguments corresponding to fields on the
- # request, apply these.
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
- if parent is not None:
- request.parent = parent
- if instance_id is not None:
- request.instance_id = instance_id
- if resource is not None:
- request.resource = resource
+ if parent is not None:
+ request.parent = parent
+ if instance_id is not None:
+ request.instance_id = instance_id
+ if resource is not None:
+ request.resource = resource
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method.wrap_method(
- self._transport.create_instance,
- default_timeout=None,
- client_info=_client_info,
+ rpc = self._transport._wrapped_methods[self._transport.create_instance]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
- response = rpc(request, retry=retry, timeout=timeout, metadata=metadata)
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
@@ -544,32 +671,42 @@ def update_instance(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([update_mask, resource]):
+ has_flattened_params = any([update_mask, resource])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = cloud_memcache.UpdateInstanceRequest(request)
+ # Minor optimization to avoid making a copy if the user passes
+ # in a cloud_memcache.UpdateInstanceRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, cloud_memcache.UpdateInstanceRequest):
+ request = cloud_memcache.UpdateInstanceRequest(request)
- # If we have keyword arguments corresponding to fields on the
- # request, apply these.
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
- if update_mask is not None:
- request.update_mask = update_mask
- if resource is not None:
- request.resource = resource
+ if update_mask is not None:
+ request.update_mask = update_mask
+ if resource is not None:
+ request.resource = resource
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method.wrap_method(
- self._transport.update_instance,
- default_timeout=None,
- client_info=_client_info,
+ rpc = self._transport._wrapped_methods[self._transport.update_instance]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata(
+ (("resource.name", request.resource.name),)
+ ),
)
# Send the request.
- response = rpc(request, retry=retry, timeout=timeout, metadata=metadata)
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
@@ -638,34 +775,42 @@ def update_parameters(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([name, update_mask, parameters]):
+ has_flattened_params = any([name, update_mask, parameters])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = cloud_memcache.UpdateParametersRequest(request)
+ # Minor optimization to avoid making a copy if the user passes
+ # in a cloud_memcache.UpdateParametersRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, cloud_memcache.UpdateParametersRequest):
+ request = cloud_memcache.UpdateParametersRequest(request)
- # If we have keyword arguments corresponding to fields on the
- # request, apply these.
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
- if name is not None:
- request.name = name
- if update_mask is not None:
- request.update_mask = update_mask
- if parameters is not None:
- request.parameters = parameters
+ if name is not None:
+ request.name = name
+ if update_mask is not None:
+ request.update_mask = update_mask
+ if parameters is not None:
+ request.parameters = parameters
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method.wrap_method(
- self._transport.update_parameters,
- default_timeout=None,
- client_info=_client_info,
+ rpc = self._transport._wrapped_methods[self._transport.update_parameters]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
- response = rpc(request, retry=retry, timeout=timeout, metadata=metadata)
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
@@ -731,30 +876,38 @@ def delete_instance(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([name]):
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = cloud_memcache.DeleteInstanceRequest(request)
+ # Minor optimization to avoid making a copy if the user passes
+ # in a cloud_memcache.DeleteInstanceRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, cloud_memcache.DeleteInstanceRequest):
+ request = cloud_memcache.DeleteInstanceRequest(request)
- # If we have keyword arguments corresponding to fields on the
- # request, apply these.
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
- if name is not None:
- request.name = name
+ if name is not None:
+ request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method.wrap_method(
- self._transport.delete_instance,
- default_timeout=None,
- client_info=_client_info,
+ rpc = self._transport._wrapped_methods[self._transport.delete_instance]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
- response = rpc(request, retry=retry, timeout=timeout, metadata=metadata)
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
@@ -826,34 +979,43 @@ def apply_parameters(
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
- if request is not None and any([name, node_ids, apply_all]):
+ has_flattened_params = any([name, node_ids, apply_all])
+ if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
- request = cloud_memcache.ApplyParametersRequest(request)
+ # Minor optimization to avoid making a copy if the user passes
+ # in a cloud_memcache.ApplyParametersRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, cloud_memcache.ApplyParametersRequest):
+ request = cloud_memcache.ApplyParametersRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
- # If we have keyword arguments corresponding to fields on the
- # request, apply these.
+ if name is not None:
+ request.name = name
+ if apply_all is not None:
+ request.apply_all = apply_all
- if name is not None:
- request.name = name
- if node_ids is not None:
- request.node_ids = node_ids
- if apply_all is not None:
- request.apply_all = apply_all
+ if node_ids:
+ request.node_ids.extend(node_ids)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
- rpc = gapic_v1.method.wrap_method(
- self._transport.apply_parameters,
- default_timeout=None,
- client_info=_client_info,
+ rpc = self._transport._wrapped_methods[self._transport.apply_parameters]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
- response = rpc(request, retry=retry, timeout=timeout, metadata=metadata)
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
@@ -868,11 +1030,11 @@ def apply_parameters(
try:
- _client_info = gapic_v1.client_info.ClientInfo(
- gapic_version=pkg_resources.get_distribution("google-cloud-memcache").version
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
+ gapic_version=pkg_resources.get_distribution("google-cloud-memcache",).version,
)
except pkg_resources.DistributionNotFound:
- _client_info = gapic_v1.client_info.ClientInfo()
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("CloudMemcacheClient",)
diff --git a/google/cloud/memcache_v1beta2/services/cloud_memcache/pagers.py b/google/cloud/memcache_v1beta2/services/cloud_memcache/pagers.py
index be8dd7f..7e7696a 100644
--- a/google/cloud/memcache_v1beta2/services/cloud_memcache/pagers.py
+++ b/google/cloud/memcache_v1beta2/services/cloud_memcache/pagers.py
@@ -15,7 +15,7 @@
# limitations under the License.
#
-from typing import Any, Callable, Iterable
+from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple
from google.cloud.memcache_v1beta2.types import cloud_memcache
@@ -40,11 +40,11 @@ class ListInstancesPager:
def __init__(
self,
- method: Callable[
- [cloud_memcache.ListInstancesRequest], cloud_memcache.ListInstancesResponse
- ],
+ method: Callable[..., cloud_memcache.ListInstancesResponse],
request: cloud_memcache.ListInstancesRequest,
response: cloud_memcache.ListInstancesResponse,
+ *,
+ metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
@@ -55,10 +55,13 @@ def __init__(
The initial request object.
response (:class:`~.cloud_memcache.ListInstancesResponse`):
The initial response object.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
"""
self._method = method
self._request = cloud_memcache.ListInstancesRequest(request)
self._response = response
+ self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@@ -68,7 +71,7 @@ def pages(self) -> Iterable[cloud_memcache.ListInstancesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
- self._response = self._method(self._request)
+ self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[cloud_memcache.Instance]:
@@ -77,3 +80,69 @@ def __iter__(self) -> Iterable[cloud_memcache.Instance]:
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
+
+
+class ListInstancesAsyncPager:
+ """A pager for iterating through ``list_instances`` requests.
+
+ This class thinly wraps an initial
+ :class:`~.cloud_memcache.ListInstancesResponse` object, and
+ provides an ``__aiter__`` method to iterate through its
+ ``resources`` field.
+
+ If there are more pages, the ``__aiter__`` method will make additional
+ ``ListInstances`` requests and continue to iterate
+ through the ``resources`` field on the
+ corresponding responses.
+
+ All the usual :class:`~.cloud_memcache.ListInstancesResponse`
+ attributes are available on the pager. If multiple requests are made, only
+ the most recent response is retained, and thus used for attribute lookup.
+ """
+
+ def __init__(
+ self,
+ method: Callable[..., Awaitable[cloud_memcache.ListInstancesResponse]],
+ request: cloud_memcache.ListInstancesRequest,
+ response: cloud_memcache.ListInstancesResponse,
+ *,
+ metadata: Sequence[Tuple[str, str]] = ()
+ ):
+ """Instantiate the pager.
+
+ Args:
+ method (Callable): The method that was originally called, and
+ which instantiated this pager.
+ request (:class:`~.cloud_memcache.ListInstancesRequest`):
+ The initial request object.
+ response (:class:`~.cloud_memcache.ListInstancesResponse`):
+ The initial response object.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+ """
+ self._method = method
+ self._request = cloud_memcache.ListInstancesRequest(request)
+ self._response = response
+ self._metadata = metadata
+
+ def __getattr__(self, name: str) -> Any:
+ return getattr(self._response, name)
+
+ @property
+ async def pages(self) -> AsyncIterable[cloud_memcache.ListInstancesResponse]:
+ yield self._response
+ while self._response.next_page_token:
+ self._request.page_token = self._response.next_page_token
+ self._response = await self._method(self._request, metadata=self._metadata)
+ yield self._response
+
+ def __aiter__(self) -> AsyncIterable[cloud_memcache.Instance]:
+ async def async_generator():
+ async for page in self.pages:
+ for response in page.resources:
+ yield response
+
+ return async_generator()
+
+ def __repr__(self) -> str:
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
diff --git a/google/cloud/memcache_v1beta2/services/cloud_memcache/transports/__init__.py b/google/cloud/memcache_v1beta2/services/cloud_memcache/transports/__init__.py
index 33272b6..38122c6 100644
--- a/google/cloud/memcache_v1beta2/services/cloud_memcache/transports/__init__.py
+++ b/google/cloud/memcache_v1beta2/services/cloud_memcache/transports/__init__.py
@@ -20,11 +20,16 @@
from .base import CloudMemcacheTransport
from .grpc import CloudMemcacheGrpcTransport
+from .grpc_asyncio import CloudMemcacheGrpcAsyncIOTransport
# Compile a registry of transports.
_transport_registry = OrderedDict() # type: Dict[str, Type[CloudMemcacheTransport]]
_transport_registry["grpc"] = CloudMemcacheGrpcTransport
+_transport_registry["grpc_asyncio"] = CloudMemcacheGrpcAsyncIOTransport
-
-__all__ = ("CloudMemcacheTransport", "CloudMemcacheGrpcTransport")
+__all__ = (
+ "CloudMemcacheTransport",
+ "CloudMemcacheGrpcTransport",
+ "CloudMemcacheGrpcAsyncIOTransport",
+)
diff --git a/google/cloud/memcache_v1beta2/services/cloud_memcache/transports/base.py b/google/cloud/memcache_v1beta2/services/cloud_memcache/transports/base.py
index e166ead..3a96e70 100644
--- a/google/cloud/memcache_v1beta2/services/cloud_memcache/transports/base.py
+++ b/google/cloud/memcache_v1beta2/services/cloud_memcache/transports/base.py
@@ -17,8 +17,12 @@
import abc
import typing
+import pkg_resources
-from google import auth
+from google import auth # type: ignore
+from google.api_core import exceptions # type: ignore
+from google.api_core import gapic_v1 # type: ignore
+from google.api_core import retry as retries # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials # type: ignore
@@ -26,7 +30,15 @@
from google.longrunning import operations_pb2 as operations # type: ignore
-class CloudMemcacheTransport(metaclass=abc.ABCMeta):
+try:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
+ gapic_version=pkg_resources.get_distribution("google-cloud-memcache",).version,
+ )
+except pkg_resources.DistributionNotFound:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
+
+
+class CloudMemcacheTransport(abc.ABC):
"""Abstract transport class for CloudMemcache."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
@@ -36,6 +48,11 @@ def __init__(
*,
host: str = "memcache.googleapis.com",
credentials: credentials.Credentials = None,
+ credentials_file: typing.Optional[str] = None,
+ scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES,
+ quota_project_id: typing.Optional[str] = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ **kwargs,
) -> None:
"""Instantiate the transport.
@@ -46,6 +63,17 @@ def __init__(
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is mutually exclusive with credentials.
+ scope (Optional[Sequence[str]]): A list of scopes.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
@@ -54,62 +82,125 @@ def __init__(
# If no credentials are provided, then determine the appropriate
# defaults.
- if credentials is None:
- credentials, _ = auth.default(scopes=self.AUTH_SCOPES)
+ if credentials and credentials_file:
+ raise exceptions.DuplicateCredentialArgs(
+ "'credentials_file' and 'credentials' are mutually exclusive"
+ )
+
+ if credentials_file is not None:
+ credentials, _ = auth.load_credentials_from_file(
+ credentials_file, scopes=scopes, quota_project_id=quota_project_id
+ )
+
+ elif credentials is None:
+ credentials, _ = auth.default(
+ scopes=scopes, quota_project_id=quota_project_id
+ )
# Save the credentials.
self._credentials = credentials
+ # Lifted into its own function so it can be stubbed out during tests.
+ self._prep_wrapped_messages(client_info)
+
+ def _prep_wrapped_messages(self, client_info):
+ # Precompute the wrapped methods.
+ self._wrapped_methods = {
+ self.list_instances: gapic_v1.method.wrap_method(
+ self.list_instances, default_timeout=1200.0, client_info=client_info,
+ ),
+ self.get_instance: gapic_v1.method.wrap_method(
+ self.get_instance, default_timeout=1200.0, client_info=client_info,
+ ),
+ self.create_instance: gapic_v1.method.wrap_method(
+ self.create_instance, default_timeout=1200.0, client_info=client_info,
+ ),
+ self.update_instance: gapic_v1.method.wrap_method(
+ self.update_instance, default_timeout=1200.0, client_info=client_info,
+ ),
+ self.update_parameters: gapic_v1.method.wrap_method(
+ self.update_parameters, default_timeout=1200.0, client_info=client_info,
+ ),
+ self.delete_instance: gapic_v1.method.wrap_method(
+ self.delete_instance, default_timeout=1200.0, client_info=client_info,
+ ),
+ self.apply_parameters: gapic_v1.method.wrap_method(
+ self.apply_parameters, default_timeout=1200.0, client_info=client_info,
+ ),
+ }
+
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Return the client designed to process long-running operations."""
- raise NotImplementedError
+ raise NotImplementedError()
@property
def list_instances(
- self
+ self,
) -> typing.Callable[
- [cloud_memcache.ListInstancesRequest], cloud_memcache.ListInstancesResponse
+ [cloud_memcache.ListInstancesRequest],
+ typing.Union[
+ cloud_memcache.ListInstancesResponse,
+ typing.Awaitable[cloud_memcache.ListInstancesResponse],
+ ],
]:
- raise NotImplementedError
+ raise NotImplementedError()
@property
def get_instance(
- self
- ) -> typing.Callable[[cloud_memcache.GetInstanceRequest], cloud_memcache.Instance]:
- raise NotImplementedError
+ self,
+ ) -> typing.Callable[
+ [cloud_memcache.GetInstanceRequest],
+ typing.Union[
+ cloud_memcache.Instance, typing.Awaitable[cloud_memcache.Instance]
+ ],
+ ]:
+ raise NotImplementedError()
@property
def create_instance(
- self
- ) -> typing.Callable[[cloud_memcache.CreateInstanceRequest], operations.Operation]:
- raise NotImplementedError
+ self,
+ ) -> typing.Callable[
+ [cloud_memcache.CreateInstanceRequest],
+ typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
+ ]:
+ raise NotImplementedError()
@property
def update_instance(
- self
- ) -> typing.Callable[[cloud_memcache.UpdateInstanceRequest], operations.Operation]:
- raise NotImplementedError
+ self,
+ ) -> typing.Callable[
+ [cloud_memcache.UpdateInstanceRequest],
+ typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
+ ]:
+ raise NotImplementedError()
@property
def update_parameters(
- self
+ self,
) -> typing.Callable[
- [cloud_memcache.UpdateParametersRequest], operations.Operation
+ [cloud_memcache.UpdateParametersRequest],
+ typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
]:
- raise NotImplementedError
+ raise NotImplementedError()
@property
def delete_instance(
- self
- ) -> typing.Callable[[cloud_memcache.DeleteInstanceRequest], operations.Operation]:
- raise NotImplementedError
+ self,
+ ) -> typing.Callable[
+ [cloud_memcache.DeleteInstanceRequest],
+ typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
+ ]:
+ raise NotImplementedError()
@property
def apply_parameters(
- self
- ) -> typing.Callable[[cloud_memcache.ApplyParametersRequest], operations.Operation]:
- raise NotImplementedError
+ self,
+ ) -> typing.Callable[
+ [cloud_memcache.ApplyParametersRequest],
+ typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
+ ]:
+ raise NotImplementedError()
__all__ = ("CloudMemcacheTransport",)
diff --git a/google/cloud/memcache_v1beta2/services/cloud_memcache/transports/grpc.py b/google/cloud/memcache_v1beta2/services/cloud_memcache/transports/grpc.py
index 0d09f79..09e1170 100644
--- a/google/cloud/memcache_v1beta2/services/cloud_memcache/transports/grpc.py
+++ b/google/cloud/memcache_v1beta2/services/cloud_memcache/transports/grpc.py
@@ -15,20 +15,22 @@
# limitations under the License.
#
-from typing import Callable, Dict, Tuple
+import warnings
+from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers # type: ignore
from google.api_core import operations_v1 # type: ignore
+from google.api_core import gapic_v1 # type: ignore
+from google import auth # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
-
import grpc # type: ignore
from google.cloud.memcache_v1beta2.types import cloud_memcache
from google.longrunning import operations_pb2 as operations # type: ignore
-from .base import CloudMemcacheTransport
+from .base import CloudMemcacheTransport, DEFAULT_CLIENT_INFO
class CloudMemcacheGrpcTransport(CloudMemcacheTransport):
@@ -63,14 +65,21 @@ class CloudMemcacheGrpcTransport(CloudMemcacheTransport):
top of HTTP/2); the ``grpcio`` package must be installed.
"""
+ _stubs: Dict[str, Callable]
+
def __init__(
self,
*,
host: str = "memcache.googleapis.com",
credentials: credentials.Credentials = None,
+ credentials_file: str = None,
+ scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
- client_cert_source: Callable[[], Tuple[bytes, bytes]] = None
+ client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
+ ssl_channel_credentials: grpc.ChannelCredentials = None,
+ quota_project_id: Optional[str] = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
@@ -82,21 +91,39 @@ def __init__(
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is ignored if ``channel`` is provided.
+ scopes (Optional(Sequence[str])): A list of scopes. This argument is
+ ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
- api_mtls_endpoint (Optional[str]): The mutual TLS endpoint. If
- provided, it overrides the ``host`` argument and tries to create
+ api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
+ If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
- client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): A
- callback to provide client SSL certificate bytes and private key
- bytes, both in PEM format. It is ignored if ``api_mtls_endpoint``
- is None.
+ client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
+ Deprecated. A callback to provide client SSL certificate bytes and
+ private key bytes, both in PEM format. It is ignored if
+ ``api_mtls_endpoint`` is None.
+ ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
+ for grpc channel. It is ignored if ``channel`` is provided.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
Raises:
- google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
+ google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
+ and ``credentials_file`` are passed.
"""
+ self._ssl_channel_credentials = ssl_channel_credentials
+
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
@@ -104,13 +131,24 @@ def __init__(
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
+ self._ssl_channel_credentials = None
elif api_mtls_endpoint:
+ warnings.warn(
+ "api_mtls_endpoint and client_cert_source are deprecated",
+ DeprecationWarning,
+ )
+
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
+ if credentials is None:
+ credentials, _ = auth.default(
+ scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
+ )
+
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
@@ -122,56 +160,103 @@ def __init__(
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
- self._grpc_channel = grpc_helpers.create_channel(
+ self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
+ credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
- scopes=self.AUTH_SCOPES,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+ self._ssl_channel_credentials = ssl_credentials
+ else:
+ host = host if ":" in host else host + ":443"
+
+ if credentials is None:
+ credentials, _ = auth.default(
+ scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
+ )
+
+ # create a new channel. The provided one is ignored.
+ self._grpc_channel = type(self).create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ ssl_credentials=ssl_channel_credentials,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
)
- # Run the base constructor.
- super().__init__(host=host, credentials=credentials)
self._stubs = {} # type: Dict[str, Callable]
+ self._operations_client = None
+
+ # Run the base constructor.
+ super().__init__(
+ host=host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ client_info=client_info,
+ )
@classmethod
def create_channel(
cls,
host: str = "memcache.googleapis.com",
credentials: credentials.Credentials = None,
- **kwargs
+ credentials_file: str = None,
+ scopes: Optional[Sequence[str]] = None,
+ quota_project_id: Optional[str] = None,
+ **kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
- address (Optionsl[str]): The host for the channel to use.
+ address (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is mutually exclusive with credentials.
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
+ service. These are only used when credentials are not specified and
+ are passed to :func:`google.auth.default`.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
+
+ Raises:
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
+ and ``credentials_file`` are passed.
"""
+ scopes = scopes or cls.AUTH_SCOPES
return grpc_helpers.create_channel(
- host, credentials=credentials, scopes=cls.AUTH_SCOPES, **kwargs
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ scopes=scopes,
+ quota_project_id=quota_project_id,
+ **kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
- """Create the channel designed to connect to this service.
-
- This property caches on the instance; repeated calls return
- the same channel.
+ """Return the channel designed to connect to this service.
"""
- # Sanity check: Only create a new channel if we do not already
- # have one.
- if not hasattr(self, "_grpc_channel"):
- self._grpc_channel = self.create_channel(
- self._host, credentials=self._credentials
- )
-
- # Return the channel from cache.
return self._grpc_channel
@property
@@ -182,17 +267,15 @@ def operations_client(self) -> operations_v1.OperationsClient:
client.
"""
# Sanity check: Only create a new client if we do not already have one.
- if "operations_client" not in self.__dict__:
- self.__dict__["operations_client"] = operations_v1.OperationsClient(
- self.grpc_channel
- )
+ if self._operations_client is None:
+ self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
# Return the client from cache.
- return self.__dict__["operations_client"]
+ return self._operations_client
@property
def list_instances(
- self
+ self,
) -> Callable[
[cloud_memcache.ListInstancesRequest], cloud_memcache.ListInstancesResponse
]:
@@ -220,7 +303,7 @@ def list_instances(
@property
def get_instance(
- self
+ self,
) -> Callable[[cloud_memcache.GetInstanceRequest], cloud_memcache.Instance]:
r"""Return a callable for the get instance method over gRPC.
@@ -246,7 +329,7 @@ def get_instance(
@property
def create_instance(
- self
+ self,
) -> Callable[[cloud_memcache.CreateInstanceRequest], operations.Operation]:
r"""Return a callable for the create instance method over gRPC.
@@ -273,7 +356,7 @@ def create_instance(
@property
def update_instance(
- self
+ self,
) -> Callable[[cloud_memcache.UpdateInstanceRequest], operations.Operation]:
r"""Return a callable for the update instance method over gRPC.
@@ -300,7 +383,7 @@ def update_instance(
@property
def update_parameters(
- self
+ self,
) -> Callable[[cloud_memcache.UpdateParametersRequest], operations.Operation]:
r"""Return a callable for the update parameters method over gRPC.
@@ -329,7 +412,7 @@ def update_parameters(
@property
def delete_instance(
- self
+ self,
) -> Callable[[cloud_memcache.DeleteInstanceRequest], operations.Operation]:
r"""Return a callable for the delete instance method over gRPC.
@@ -355,7 +438,7 @@ def delete_instance(
@property
def apply_parameters(
- self
+ self,
) -> Callable[[cloud_memcache.ApplyParametersRequest], operations.Operation]:
r"""Return a callable for the apply parameters method over gRPC.
diff --git a/google/cloud/memcache_v1beta2/services/cloud_memcache/transports/grpc_asyncio.py b/google/cloud/memcache_v1beta2/services/cloud_memcache/transports/grpc_asyncio.py
new file mode 100644
index 0000000..009acf5
--- /dev/null
+++ b/google/cloud/memcache_v1beta2/services/cloud_memcache/transports/grpc_asyncio.py
@@ -0,0 +1,486 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import warnings
+from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple
+
+from google.api_core import gapic_v1 # type: ignore
+from google.api_core import grpc_helpers_async # type: ignore
+from google.api_core import operations_v1 # type: ignore
+from google import auth # type: ignore
+from google.auth import credentials # type: ignore
+from google.auth.transport.grpc import SslCredentials # type: ignore
+
+import grpc # type: ignore
+from grpc.experimental import aio # type: ignore
+
+from google.cloud.memcache_v1beta2.types import cloud_memcache
+from google.longrunning import operations_pb2 as operations # type: ignore
+
+from .base import CloudMemcacheTransport, DEFAULT_CLIENT_INFO
+from .grpc import CloudMemcacheGrpcTransport
+
+
+class CloudMemcacheGrpcAsyncIOTransport(CloudMemcacheTransport):
+ """gRPC AsyncIO backend transport for CloudMemcache.
+
+ Configures and manages Cloud Memorystore for Memcached instances.
+
+ The ``memcache.googleapis.com`` service implements the Google Cloud
+ Memorystore for Memcached API and defines the following resource
+ model for managing Memorystore Memcached (also called Memcached
+ below) instances:
+
+ - The service works with a collection of cloud projects, named:
+ ``/projects/*``
+ - Each project has a collection of available locations, named:
+ ``/locations/*``
+ - Each location has a collection of Memcached instances, named:
+ ``/instances/*``
+ - As such, Memcached instances are resources of the form:
+ ``/projects/{project_id}/locations/{location_id}/instances/{instance_id}``
+
+ Note that location_id must be refering to a GCP ``region``; for
+ example:
+
+ - ``projects/my-memcached-project/locations/us-central1/instances/my-memcached``
+
+ This class defines the same methods as the primary client, so the
+ primary client can load the underlying transport implementation
+ and call it.
+
+ It sends protocol buffers over the wire using gRPC (which is built on
+ top of HTTP/2); the ``grpcio`` package must be installed.
+ """
+
+ _grpc_channel: aio.Channel
+ _stubs: Dict[str, Callable] = {}
+
+ @classmethod
+ def create_channel(
+ cls,
+ host: str = "memcache.googleapis.com",
+ credentials: credentials.Credentials = None,
+ credentials_file: Optional[str] = None,
+ scopes: Optional[Sequence[str]] = None,
+ quota_project_id: Optional[str] = None,
+ **kwargs,
+ ) -> aio.Channel:
+ """Create and return a gRPC AsyncIO channel object.
+ Args:
+ address (Optional[str]): The host for the channel to use.
+ credentials (Optional[~.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify this application to the service. If
+ none are specified, the client will attempt to ascertain
+ the credentials from the environment.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is ignored if ``channel`` is provided.
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
+ service. These are only used when credentials are not specified and
+ are passed to :func:`google.auth.default`.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ kwargs (Optional[dict]): Keyword arguments, which are passed to the
+ channel creation.
+ Returns:
+ aio.Channel: A gRPC AsyncIO channel object.
+ """
+ scopes = scopes or cls.AUTH_SCOPES
+ return grpc_helpers_async.create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ scopes=scopes,
+ quota_project_id=quota_project_id,
+ **kwargs,
+ )
+
+ def __init__(
+ self,
+ *,
+ host: str = "memcache.googleapis.com",
+ credentials: credentials.Credentials = None,
+ credentials_file: Optional[str] = None,
+ scopes: Optional[Sequence[str]] = None,
+ channel: aio.Channel = None,
+ api_mtls_endpoint: str = None,
+ client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
+ ssl_channel_credentials: grpc.ChannelCredentials = None,
+ quota_project_id=None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ ) -> None:
+ """Instantiate the transport.
+
+ Args:
+ host (Optional[str]): The hostname to connect to.
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ This argument is ignored if ``channel`` is provided.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is ignored if ``channel`` is provided.
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
+ service. These are only used when credentials are not specified and
+ are passed to :func:`google.auth.default`.
+ channel (Optional[aio.Channel]): A ``Channel`` instance through
+ which to make calls.
+ api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
+ If provided, it overrides the ``host`` argument and tries to create
+ a mutual TLS channel with client SSL credentials from
+ ``client_cert_source`` or applicatin default SSL credentials.
+ client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
+ Deprecated. A callback to provide client SSL certificate bytes and
+ private key bytes, both in PEM format. It is ignored if
+ ``api_mtls_endpoint`` is None.
+ ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
+ for grpc channel. It is ignored if ``channel`` is provided.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
+
+ Raises:
+ google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
+ creation failed for any reason.
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
+ and ``credentials_file`` are passed.
+ """
+ self._ssl_channel_credentials = ssl_channel_credentials
+
+ if channel:
+ # Sanity check: Ensure that channel and credentials are not both
+ # provided.
+ credentials = False
+
+ # If a channel was explicitly provided, set it.
+ self._grpc_channel = channel
+ self._ssl_channel_credentials = None
+ elif api_mtls_endpoint:
+ warnings.warn(
+ "api_mtls_endpoint and client_cert_source are deprecated",
+ DeprecationWarning,
+ )
+
+ host = (
+ api_mtls_endpoint
+ if ":" in api_mtls_endpoint
+ else api_mtls_endpoint + ":443"
+ )
+
+ if credentials is None:
+ credentials, _ = auth.default(
+ scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
+ )
+
+ # Create SSL credentials with client_cert_source or application
+ # default SSL credentials.
+ if client_cert_source:
+ cert, key = client_cert_source()
+ ssl_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
+ else:
+ ssl_credentials = SslCredentials().ssl_credentials
+
+ # create a new channel. The provided one is ignored.
+ self._grpc_channel = type(self).create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ ssl_credentials=ssl_credentials,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+ self._ssl_channel_credentials = ssl_credentials
+ else:
+ host = host if ":" in host else host + ":443"
+
+ if credentials is None:
+ credentials, _ = auth.default(
+ scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
+ )
+
+ # create a new channel. The provided one is ignored.
+ self._grpc_channel = type(self).create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ ssl_credentials=ssl_channel_credentials,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+ # Run the base constructor.
+ super().__init__(
+ host=host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ scopes=scopes or self.AUTH_SCOPES,
+ quota_project_id=quota_project_id,
+ client_info=client_info,
+ )
+
+ self._stubs = {}
+ self._operations_client = None
+
+ @property
+ def grpc_channel(self) -> aio.Channel:
+ """Create the channel designed to connect to this service.
+
+ This property caches on the instance; repeated calls return
+ the same channel.
+ """
+ # Return the channel from cache.
+ return self._grpc_channel
+
+ @property
+ def operations_client(self) -> operations_v1.OperationsAsyncClient:
+ """Create the client designed to process long-running operations.
+
+ This property caches on the instance; repeated calls return the same
+ client.
+ """
+ # Sanity check: Only create a new client if we do not already have one.
+ if self._operations_client is None:
+ self._operations_client = operations_v1.OperationsAsyncClient(
+ self.grpc_channel
+ )
+
+ # Return the client from cache.
+ return self._operations_client
+
+ @property
+ def list_instances(
+ self,
+ ) -> Callable[
+ [cloud_memcache.ListInstancesRequest],
+ Awaitable[cloud_memcache.ListInstancesResponse],
+ ]:
+ r"""Return a callable for the list instances method over gRPC.
+
+ Lists Instances in a given project and location.
+
+ Returns:
+ Callable[[~.ListInstancesRequest],
+ Awaitable[~.ListInstancesResponse]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "list_instances" not in self._stubs:
+ self._stubs["list_instances"] = self.grpc_channel.unary_unary(
+ "/google.cloud.memcache.v1beta2.CloudMemcache/ListInstances",
+ request_serializer=cloud_memcache.ListInstancesRequest.serialize,
+ response_deserializer=cloud_memcache.ListInstancesResponse.deserialize,
+ )
+ return self._stubs["list_instances"]
+
+ @property
+ def get_instance(
+ self,
+ ) -> Callable[
+ [cloud_memcache.GetInstanceRequest], Awaitable[cloud_memcache.Instance]
+ ]:
+ r"""Return a callable for the get instance method over gRPC.
+
+ Gets details of a single Instance.
+
+ Returns:
+ Callable[[~.GetInstanceRequest],
+ Awaitable[~.Instance]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "get_instance" not in self._stubs:
+ self._stubs["get_instance"] = self.grpc_channel.unary_unary(
+ "/google.cloud.memcache.v1beta2.CloudMemcache/GetInstance",
+ request_serializer=cloud_memcache.GetInstanceRequest.serialize,
+ response_deserializer=cloud_memcache.Instance.deserialize,
+ )
+ return self._stubs["get_instance"]
+
+ @property
+ def create_instance(
+ self,
+ ) -> Callable[
+ [cloud_memcache.CreateInstanceRequest], Awaitable[operations.Operation]
+ ]:
+ r"""Return a callable for the create instance method over gRPC.
+
+ Creates a new Instance in a given project and
+ location.
+
+ Returns:
+ Callable[[~.CreateInstanceRequest],
+ Awaitable[~.Operation]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "create_instance" not in self._stubs:
+ self._stubs["create_instance"] = self.grpc_channel.unary_unary(
+ "/google.cloud.memcache.v1beta2.CloudMemcache/CreateInstance",
+ request_serializer=cloud_memcache.CreateInstanceRequest.serialize,
+ response_deserializer=operations.Operation.FromString,
+ )
+ return self._stubs["create_instance"]
+
+ @property
+ def update_instance(
+ self,
+ ) -> Callable[
+ [cloud_memcache.UpdateInstanceRequest], Awaitable[operations.Operation]
+ ]:
+ r"""Return a callable for the update instance method over gRPC.
+
+ Updates an existing Instance in a given project and
+ location.
+
+ Returns:
+ Callable[[~.UpdateInstanceRequest],
+ Awaitable[~.Operation]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "update_instance" not in self._stubs:
+ self._stubs["update_instance"] = self.grpc_channel.unary_unary(
+ "/google.cloud.memcache.v1beta2.CloudMemcache/UpdateInstance",
+ request_serializer=cloud_memcache.UpdateInstanceRequest.serialize,
+ response_deserializer=operations.Operation.FromString,
+ )
+ return self._stubs["update_instance"]
+
+ @property
+ def update_parameters(
+ self,
+ ) -> Callable[
+ [cloud_memcache.UpdateParametersRequest], Awaitable[operations.Operation]
+ ]:
+ r"""Return a callable for the update parameters method over gRPC.
+
+ Updates the defined Memcached Parameters for an
+ existing Instance. This method only stages the
+ parameters, it must be followed by ApplyParameters to
+ apply the parameters to nodes of the Memcached Instance.
+
+ Returns:
+ Callable[[~.UpdateParametersRequest],
+ Awaitable[~.Operation]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "update_parameters" not in self._stubs:
+ self._stubs["update_parameters"] = self.grpc_channel.unary_unary(
+ "/google.cloud.memcache.v1beta2.CloudMemcache/UpdateParameters",
+ request_serializer=cloud_memcache.UpdateParametersRequest.serialize,
+ response_deserializer=operations.Operation.FromString,
+ )
+ return self._stubs["update_parameters"]
+
+ @property
+ def delete_instance(
+ self,
+ ) -> Callable[
+ [cloud_memcache.DeleteInstanceRequest], Awaitable[operations.Operation]
+ ]:
+ r"""Return a callable for the delete instance method over gRPC.
+
+ Deletes a single Instance.
+
+ Returns:
+ Callable[[~.DeleteInstanceRequest],
+ Awaitable[~.Operation]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "delete_instance" not in self._stubs:
+ self._stubs["delete_instance"] = self.grpc_channel.unary_unary(
+ "/google.cloud.memcache.v1beta2.CloudMemcache/DeleteInstance",
+ request_serializer=cloud_memcache.DeleteInstanceRequest.serialize,
+ response_deserializer=operations.Operation.FromString,
+ )
+ return self._stubs["delete_instance"]
+
+ @property
+ def apply_parameters(
+ self,
+ ) -> Callable[
+ [cloud_memcache.ApplyParametersRequest], Awaitable[operations.Operation]
+ ]:
+ r"""Return a callable for the apply parameters method over gRPC.
+
+ ApplyParameters will update current set of Parameters
+ to the set of specified nodes of the Memcached Instance.
+
+ Returns:
+ Callable[[~.ApplyParametersRequest],
+ Awaitable[~.Operation]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "apply_parameters" not in self._stubs:
+ self._stubs["apply_parameters"] = self.grpc_channel.unary_unary(
+ "/google.cloud.memcache.v1beta2.CloudMemcache/ApplyParameters",
+ request_serializer=cloud_memcache.ApplyParametersRequest.serialize,
+ response_deserializer=operations.Operation.FromString,
+ )
+ return self._stubs["apply_parameters"]
+
+
+__all__ = ("CloudMemcacheGrpcAsyncIOTransport",)
diff --git a/google/cloud/memcache_v1beta2/types/__init__.py b/google/cloud/memcache_v1beta2/types/__init__.py
index f28a4dc..8619a5a 100644
--- a/google/cloud/memcache_v1beta2/types/__init__.py
+++ b/google/cloud/memcache_v1beta2/types/__init__.py
@@ -29,9 +29,9 @@
OperationMetadata,
LocationMetadata,
ZoneMetadata,
+ MemcacheVersion,
)
-
__all__ = (
"Instance",
"ListInstancesRequest",
@@ -46,4 +46,5 @@
"OperationMetadata",
"LocationMetadata",
"ZoneMetadata",
+ "MemcacheVersion",
)
diff --git a/google/cloud/memcache_v1beta2/types/cloud_memcache.py b/google/cloud/memcache_v1beta2/types/cloud_memcache.py
index 5a0423f..36cd39e 100644
--- a/google/cloud/memcache_v1beta2/types/cloud_memcache.py
+++ b/google/cloud/memcache_v1beta2/types/cloud_memcache.py
@@ -75,8 +75,8 @@ class Instance(proto.Message):
resources
authorized_network (str):
Optional. The full name of the Google Compute Engine
- `network `__
- to which the instance is connected. If left unspecified, the
+ `network `__ to which
+ the instance is connected. If left unspecified, the
``default`` network will be used.
zones (Sequence[str]):
Optional. Zones where Memcached nodes should
@@ -148,6 +148,7 @@ class NodeConfig(proto.Message):
"""
cpu_count = proto.Field(proto.INT32, number=1)
+
memory_size_mb = proto.Field(proto.INT32, number=2)
class Node(proto.Message):
@@ -187,11 +188,16 @@ class State(proto.Enum):
UPDATING = 4
node_id = proto.Field(proto.STRING, number=1)
+
zone = proto.Field(proto.STRING, number=2)
- state = proto.Field(proto.ENUM, number=3, enum="Instance.Node.State")
+
+ state = proto.Field(proto.ENUM, number=3, enum="Instance.Node.State",)
+
host = proto.Field(proto.STRING, number=4)
+
port = proto.Field(proto.INT32, number=5)
- parameters = proto.Field(proto.MESSAGE, number=6, message="MemcacheParameters")
+
+ parameters = proto.Field(proto.MESSAGE, number=6, message="MemcacheParameters",)
class InstanceMessage(proto.Message):
r"""
@@ -210,26 +216,42 @@ class Code(proto.Enum):
CODE_UNSPECIFIED = 0
ZONE_DISTRIBUTION_UNBALANCED = 1
- code = proto.Field(proto.ENUM, number=1, enum="Instance.InstanceMessage.Code")
+ code = proto.Field(proto.ENUM, number=1, enum="Instance.InstanceMessage.Code",)
+
message = proto.Field(proto.STRING, number=2)
name = proto.Field(proto.STRING, number=1)
+
display_name = proto.Field(proto.STRING, number=2)
+
labels = proto.MapField(proto.STRING, proto.STRING, number=3)
+
authorized_network = proto.Field(proto.STRING, number=4)
+
zones = proto.RepeatedField(proto.STRING, number=5)
+
node_count = proto.Field(proto.INT32, number=6)
- node_config = proto.Field(proto.MESSAGE, number=7, message=NodeConfig)
- memcache_version = proto.Field(proto.ENUM, number=9, enum="MemcacheVersion")
- parameters = proto.Field(proto.MESSAGE, number=11, message="MemcacheParameters")
- memcache_nodes = proto.RepeatedField(proto.MESSAGE, number=12, message=Node)
- create_time = proto.Field(proto.MESSAGE, number=13, message=timestamp.Timestamp)
- update_time = proto.Field(proto.MESSAGE, number=14, message=timestamp.Timestamp)
- state = proto.Field(proto.ENUM, number=15, enum=State)
+
+ node_config = proto.Field(proto.MESSAGE, number=7, message=NodeConfig,)
+
+ memcache_version = proto.Field(proto.ENUM, number=9, enum="MemcacheVersion",)
+
+ parameters = proto.Field(proto.MESSAGE, number=11, message="MemcacheParameters",)
+
+ memcache_nodes = proto.RepeatedField(proto.MESSAGE, number=12, message=Node,)
+
+ create_time = proto.Field(proto.MESSAGE, number=13, message=timestamp.Timestamp,)
+
+ update_time = proto.Field(proto.MESSAGE, number=14, message=timestamp.Timestamp,)
+
+ state = proto.Field(proto.ENUM, number=15, enum=State,)
+
memcache_full_version = proto.Field(proto.STRING, number=18)
+
instance_messages = proto.RepeatedField(
- proto.MESSAGE, number=19, message=InstanceMessage
+ proto.MESSAGE, number=19, message=InstanceMessage,
)
+
discovery_endpoint = proto.Field(proto.STRING, number=20)
@@ -264,9 +286,13 @@ class ListInstancesRequest(proto.Message):
"""
parent = proto.Field(proto.STRING, number=1)
+
page_size = proto.Field(proto.INT32, number=2)
+
page_token = proto.Field(proto.STRING, number=3)
+
filter = proto.Field(proto.STRING, number=4)
+
order_by = proto.Field(proto.STRING, number=5)
@@ -294,8 +320,10 @@ class ListInstancesResponse(proto.Message):
def raw_page(self):
return self
- resources = proto.RepeatedField(proto.MESSAGE, number=1, message=Instance)
+ resources = proto.RepeatedField(proto.MESSAGE, number=1, message="Instance",)
+
next_page_token = proto.Field(proto.STRING, number=2)
+
unreachable = proto.RepeatedField(proto.STRING, number=3)
@@ -337,8 +365,10 @@ class CreateInstanceRequest(proto.Message):
"""
parent = proto.Field(proto.STRING, number=1)
+
instance_id = proto.Field(proto.STRING, number=2)
- resource = proto.Field(proto.MESSAGE, number=3, message=Instance)
+
+ resource = proto.Field(proto.MESSAGE, number=3, message="Instance",)
class UpdateInstanceRequest(proto.Message):
@@ -355,8 +385,9 @@ class UpdateInstanceRequest(proto.Message):
specified in update_mask are updated.
"""
- update_mask = proto.Field(proto.MESSAGE, number=1, message=field_mask.FieldMask)
- resource = proto.Field(proto.MESSAGE, number=2, message=Instance)
+ update_mask = proto.Field(proto.MESSAGE, number=1, message=field_mask.FieldMask,)
+
+ resource = proto.Field(proto.MESSAGE, number=2, message="Instance",)
class DeleteInstanceRequest(proto.Message):
@@ -394,7 +425,9 @@ class ApplyParametersRequest(proto.Message):
"""
name = proto.Field(proto.STRING, number=1)
+
node_ids = proto.RepeatedField(proto.STRING, number=2)
+
apply_all = proto.Field(proto.BOOL, number=3)
@@ -414,8 +447,10 @@ class UpdateParametersRequest(proto.Message):
"""
name = proto.Field(proto.STRING, number=1)
- update_mask = proto.Field(proto.MESSAGE, number=2, message=field_mask.FieldMask)
- parameters = proto.Field(proto.MESSAGE, number=3, message="MemcacheParameters")
+
+ update_mask = proto.Field(proto.MESSAGE, number=2, message=field_mask.FieldMask,)
+
+ parameters = proto.Field(proto.MESSAGE, number=3, message="MemcacheParameters",)
class MemcacheParameters(proto.Message):
@@ -435,6 +470,7 @@ class MemcacheParameters(proto.Message):
"""
id = proto.Field(proto.STRING, number=1)
+
params = proto.MapField(proto.STRING, proto.STRING, number=3)
@@ -464,12 +500,18 @@ class OperationMetadata(proto.Message):
API version used to start the operation.
"""
- create_time = proto.Field(proto.MESSAGE, number=1, message=timestamp.Timestamp)
- end_time = proto.Field(proto.MESSAGE, number=2, message=timestamp.Timestamp)
+ create_time = proto.Field(proto.MESSAGE, number=1, message=timestamp.Timestamp,)
+
+ end_time = proto.Field(proto.MESSAGE, number=2, message=timestamp.Timestamp,)
+
target = proto.Field(proto.STRING, number=3)
+
verb = proto.Field(proto.STRING, number=4)
+
status_detail = proto.Field(proto.STRING, number=5)
+
cancel_requested = proto.Field(proto.BOOL, number=6)
+
api_version = proto.Field(proto.STRING, number=7)
@@ -486,7 +528,7 @@ class LocationMetadata(proto.Message):
"""
available_zones = proto.MapField(
- proto.STRING, proto.MESSAGE, number=1, message="ZoneMetadata"
+ proto.STRING, proto.MESSAGE, number=1, message="ZoneMetadata",
)
diff --git a/noxfile.py b/noxfile.py
index 8563032..8004482 100644
--- a/noxfile.py
+++ b/noxfile.py
@@ -23,14 +23,15 @@
import nox
-BLACK_VERSION = "black==19.3b0"
+BLACK_VERSION = "black==19.10b0"
BLACK_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"]
-if os.path.exists("samples"):
- BLACK_PATHS.append("samples")
+DEFAULT_PYTHON_VERSION = "3.8"
+SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"]
+UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9"]
-@nox.session(python="3.7")
+@nox.session(python=DEFAULT_PYTHON_VERSION)
def lint(session):
"""Run linters.
@@ -38,7 +39,9 @@ def lint(session):
serious code quality issues.
"""
session.install("flake8", BLACK_VERSION)
- session.run("black", "--check", *BLACK_PATHS)
+ session.run(
+ "black", "--check", *BLACK_PATHS,
+ )
session.run("flake8", "google", "tests")
@@ -53,10 +56,12 @@ def blacken(session):
check the state of the `gcp_ubuntu_config` we use for that Kokoro run.
"""
session.install(BLACK_VERSION)
- session.run("black", *BLACK_PATHS)
+ session.run(
+ "black", *BLACK_PATHS,
+ )
-@nox.session(python="3.7")
+@nox.session(python=DEFAULT_PYTHON_VERSION)
def lint_setup_py(session):
"""Verify that setup.py is valid (including RST check)."""
session.install("docutils", "pygments")
@@ -65,16 +70,19 @@ def lint_setup_py(session):
def default(session):
# Install all test dependencies, then install this package in-place.
- session.install("mock", "pytest", "pytest-cov")
+ session.install("asyncmock", "pytest-asyncio")
+
+ session.install(
+ "mock", "pytest", "pytest-cov",
+ )
session.install("-e", ".")
# Run py.test against the unit tests.
session.run(
"py.test",
"--quiet",
- "--cov=google.cloud.memcache",
- "--cov=google.cloud",
- "--cov=tests.unit",
+ "--cov=google/cloud",
+ "--cov=tests/unit",
"--cov-append",
"--cov-config=.coveragerc",
"--cov-report=",
@@ -84,17 +92,21 @@ def default(session):
)
-@nox.session(python=["3.6", "3.7", "3.8"])
+@nox.session(python=UNIT_TEST_PYTHON_VERSIONS)
def unit(session):
"""Run the unit test suite."""
default(session)
-@nox.session(python=["3.7"])
+@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)
def system(session):
"""Run the system test suite."""
system_test_path = os.path.join("tests", "system.py")
system_test_folder_path = os.path.join("tests", "system")
+
+ # Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true.
+ if os.environ.get("RUN_SYSTEM_TESTS", "true") == "false":
+ session.skip("RUN_SYSTEM_TESTS is set to false, skipping")
# Sanity check: Only run tests if the environment variable is set.
if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""):
session.skip("Credentials must be set via environment variable")
@@ -110,7 +122,9 @@ def system(session):
# Install all test dependencies, then install this package into the
# virtualenv's dist-packages.
- session.install("mock", "pytest", "google-cloud-testutils")
+ session.install(
+ "mock", "pytest", "google-cloud-testutils",
+ )
session.install("-e", ".")
# Run py.test against the system tests.
@@ -120,7 +134,7 @@ def system(session):
session.run("py.test", "--quiet", system_test_folder_path, *session.posargs)
-@nox.session(python="3.7")
+@nox.session(python=DEFAULT_PYTHON_VERSION)
def cover(session):
"""Run the final coverage report.
@@ -133,7 +147,7 @@ def cover(session):
session.run("coverage", "erase")
-@nox.session(python="3.7")
+@nox.session(python=DEFAULT_PYTHON_VERSION)
def docs(session):
"""Build the docs for this library."""
@@ -153,3 +167,38 @@ def docs(session):
os.path.join("docs", ""),
os.path.join("docs", "_build", "html", ""),
)
+
+
+@nox.session(python=DEFAULT_PYTHON_VERSION)
+def docfx(session):
+ """Build the docfx yaml files for this library."""
+
+ session.install("-e", ".")
+ # sphinx-docfx-yaml supports up to sphinx version 1.5.5.
+ # https://github.com/docascode/sphinx-docfx-yaml/issues/97
+ session.install("sphinx==1.5.5", "alabaster", "recommonmark", "sphinx-docfx-yaml")
+
+ shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
+ session.run(
+ "sphinx-build",
+ "-T", # show full traceback on exception
+ "-N", # no colors
+ "-D",
+ (
+ "extensions=sphinx.ext.autodoc,"
+ "sphinx.ext.autosummary,"
+ "docfx_yaml.extension,"
+ "sphinx.ext.intersphinx,"
+ "sphinx.ext.coverage,"
+ "sphinx.ext.napoleon,"
+ "sphinx.ext.todo,"
+ "sphinx.ext.viewcode,"
+ "recommonmark"
+ ),
+ "-b",
+ "html",
+ "-d",
+ os.path.join("docs", "_build", "doctrees", ""),
+ os.path.join("docs", ""),
+ os.path.join("docs", "_build", "html", ""),
+ )
diff --git a/scripts/decrypt-secrets.sh b/scripts/decrypt-secrets.sh
new file mode 100755
index 0000000..21f6d2a
--- /dev/null
+++ b/scripts/decrypt-secrets.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+ROOT=$( dirname "$DIR" )
+
+# Work from the project root.
+cd $ROOT
+
+# Prevent it from overriding files.
+# We recommend that sample authors use their own service account files and cloud project.
+# In that case, they are supposed to prepare these files by themselves.
+if [[ -f "testing/test-env.sh" ]] || \
+ [[ -f "testing/service-account.json" ]] || \
+ [[ -f "testing/client-secrets.json" ]]; then
+ echo "One or more target files exist, aborting."
+ exit 1
+fi
+
+# Use SECRET_MANAGER_PROJECT if set, fallback to cloud-devrel-kokoro-resources.
+PROJECT_ID="${SECRET_MANAGER_PROJECT:-cloud-devrel-kokoro-resources}"
+
+gcloud secrets versions access latest --secret="python-docs-samples-test-env" \
+ --project="${PROJECT_ID}" \
+ > testing/test-env.sh
+gcloud secrets versions access latest \
+ --secret="python-docs-samples-service-account" \
+ --project="${PROJECT_ID}" \
+ > testing/service-account.json
+gcloud secrets versions access latest \
+ --secret="python-docs-samples-client-secrets" \
+ --project="${PROJECT_ID}" \
+ > testing/client-secrets.json
diff --git a/scripts/fixup_memcache_v1beta2_keywords.py b/scripts/fixup_memcache_v1beta2_keywords.py
new file mode 100644
index 0000000..459ba63
--- /dev/null
+++ b/scripts/fixup_memcache_v1beta2_keywords.py
@@ -0,0 +1,185 @@
+#! /usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import argparse
+import os
+import libcst as cst
+import pathlib
+import sys
+from typing import (Any, Callable, Dict, List, Sequence, Tuple)
+
+
+def partition(
+ predicate: Callable[[Any], bool],
+ iterator: Sequence[Any]
+) -> Tuple[List[Any], List[Any]]:
+ """A stable, out-of-place partition."""
+ results = ([], [])
+
+ for i in iterator:
+ results[int(predicate(i))].append(i)
+
+ # Returns trueList, falseList
+ return results[1], results[0]
+
+
+class memcacheCallTransformer(cst.CSTTransformer):
+ CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata')
+ METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
+ 'apply_parameters': ('name', 'node_ids', 'apply_all', ),
+ 'create_instance': ('parent', 'instance_id', 'resource', ),
+ 'delete_instance': ('name', ),
+ 'get_instance': ('name', ),
+ 'list_instances': ('parent', 'page_size', 'page_token', 'filter', 'order_by', ),
+ 'update_instance': ('update_mask', 'resource', ),
+ 'update_parameters': ('name', 'update_mask', 'parameters', ),
+
+ }
+
+ def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode:
+ try:
+ key = original.func.attr.value
+ kword_params = self.METHOD_TO_PARAMS[key]
+ except (AttributeError, KeyError):
+ # Either not a method from the API or too convoluted to be sure.
+ return updated
+
+ # If the existing code is valid, keyword args come after positional args.
+ # Therefore, all positional args must map to the first parameters.
+ args, kwargs = partition(lambda a: not bool(a.keyword), updated.args)
+ if any(k.keyword.value == "request" for k in kwargs):
+ # We've already fixed this file, don't fix it again.
+ return updated
+
+ kwargs, ctrl_kwargs = partition(
+ lambda a: not a.keyword.value in self.CTRL_PARAMS,
+ kwargs
+ )
+
+ args, ctrl_args = args[:len(kword_params)], args[len(kword_params):]
+ ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl))
+ for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS))
+
+ request_arg = cst.Arg(
+ value=cst.Dict([
+ cst.DictElement(
+ cst.SimpleString("'{}'".format(name)),
+ cst.Element(value=arg.value)
+ )
+ # Note: the args + kwargs looks silly, but keep in mind that
+ # the control parameters had to be stripped out, and that
+ # those could have been passed positionally or by keyword.
+ for name, arg in zip(kword_params, args + kwargs)]),
+ keyword=cst.Name("request")
+ )
+
+ return updated.with_changes(
+ args=[request_arg] + ctrl_kwargs
+ )
+
+
+def fix_files(
+ in_dir: pathlib.Path,
+ out_dir: pathlib.Path,
+ *,
+ transformer=memcacheCallTransformer(),
+):
+ """Duplicate the input dir to the output dir, fixing file method calls.
+
+ Preconditions:
+ * in_dir is a real directory
+ * out_dir is a real, empty directory
+ """
+ pyfile_gen = (
+ pathlib.Path(os.path.join(root, f))
+ for root, _, files in os.walk(in_dir)
+ for f in files if os.path.splitext(f)[1] == ".py"
+ )
+
+ for fpath in pyfile_gen:
+ with open(fpath, 'r') as f:
+ src = f.read()
+
+ # Parse the code and insert method call fixes.
+ tree = cst.parse_module(src)
+ updated = tree.visit(transformer)
+
+ # Create the path and directory structure for the new file.
+ updated_path = out_dir.joinpath(fpath.relative_to(in_dir))
+ updated_path.parent.mkdir(parents=True, exist_ok=True)
+
+ # Generate the updated source file at the corresponding path.
+ with open(updated_path, 'w') as f:
+ f.write(updated.code)
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(
+ description="""Fix up source that uses the memcache client library.
+
+The existing sources are NOT overwritten but are copied to output_dir with changes made.
+
+Note: This tool operates at a best-effort level at converting positional
+ parameters in client method calls to keyword based parameters.
+ Cases where it WILL FAIL include
+ A) * or ** expansion in a method call.
+ B) Calls via function or method alias (includes free function calls)
+ C) Indirect or dispatched calls (e.g. the method is looked up dynamically)
+
+ These all constitute false negatives. The tool will also detect false
+ positives when an API method shares a name with another method.
+""")
+ parser.add_argument(
+ '-d',
+ '--input-directory',
+ required=True,
+ dest='input_dir',
+ help='the input directory to walk for python files to fix up',
+ )
+ parser.add_argument(
+ '-o',
+ '--output-directory',
+ required=True,
+ dest='output_dir',
+ help='the directory to output files fixed via un-flattening',
+ )
+ args = parser.parse_args()
+ input_dir = pathlib.Path(args.input_dir)
+ output_dir = pathlib.Path(args.output_dir)
+ if not input_dir.is_dir():
+ print(
+ f"input directory '{input_dir}' does not exist or is not a directory",
+ file=sys.stderr,
+ )
+ sys.exit(-1)
+
+ if not output_dir.is_dir():
+ print(
+ f"output directory '{output_dir}' does not exist or is not a directory",
+ file=sys.stderr,
+ )
+ sys.exit(-1)
+
+ if os.listdir(output_dir):
+ print(
+ f"output directory '{output_dir}' is not empty",
+ file=sys.stderr,
+ )
+ sys.exit(-1)
+
+ fix_files(input_dir, output_dir)
diff --git a/scripts/readme-gen/readme_gen.py b/scripts/readme-gen/readme_gen.py
new file mode 100644
index 0000000..d309d6e
--- /dev/null
+++ b/scripts/readme-gen/readme_gen.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+
+# Copyright 2016 Google Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Generates READMEs using configuration defined in yaml."""
+
+import argparse
+import io
+import os
+import subprocess
+
+import jinja2
+import yaml
+
+
+jinja_env = jinja2.Environment(
+ trim_blocks=True,
+ loader=jinja2.FileSystemLoader(
+ os.path.abspath(os.path.join(os.path.dirname(__file__), 'templates'))))
+
+README_TMPL = jinja_env.get_template('README.tmpl.rst')
+
+
+def get_help(file):
+ return subprocess.check_output(['python', file, '--help']).decode()
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('source')
+ parser.add_argument('--destination', default='README.rst')
+
+ args = parser.parse_args()
+
+ source = os.path.abspath(args.source)
+ root = os.path.dirname(source)
+ destination = os.path.join(root, args.destination)
+
+ jinja_env.globals['get_help'] = get_help
+
+ with io.open(source, 'r') as f:
+ config = yaml.load(f)
+
+ # This allows get_help to execute in the right directory.
+ os.chdir(root)
+
+ output = README_TMPL.render(config)
+
+ with io.open(destination, 'w') as f:
+ f.write(output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/scripts/readme-gen/templates/README.tmpl.rst b/scripts/readme-gen/templates/README.tmpl.rst
new file mode 100644
index 0000000..4fd2397
--- /dev/null
+++ b/scripts/readme-gen/templates/README.tmpl.rst
@@ -0,0 +1,87 @@
+{# The following line is a lie. BUT! Once jinja2 is done with it, it will
+ become truth! #}
+.. This file is automatically generated. Do not edit this file directly.
+
+{{product.name}} Python Samples
+===============================================================================
+
+.. image:: https://gstatic.com/cloudssh/images/open-btn.png
+ :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor={{folder}}/README.rst
+
+
+This directory contains samples for {{product.name}}. {{product.description}}
+
+{{description}}
+
+.. _{{product.name}}: {{product.url}}
+
+{% if required_api_url %}
+To run the sample, you need to enable the API at: {{required_api_url}}
+{% endif %}
+
+{% if required_role %}
+To run the sample, you need to have `{{required_role}}` role.
+{% endif %}
+
+{{other_required_steps}}
+
+{% if setup %}
+Setup
+-------------------------------------------------------------------------------
+
+{% for section in setup %}
+
+{% include section + '.tmpl.rst' %}
+
+{% endfor %}
+{% endif %}
+
+{% if samples %}
+Samples
+-------------------------------------------------------------------------------
+
+{% for sample in samples %}
+{{sample.name}}
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+{% if not sample.hide_cloudshell_button %}
+.. image:: https://gstatic.com/cloudssh/images/open-btn.png
+ :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor={{folder}}/{{sample.file}},{{folder}}/README.rst
+{% endif %}
+
+
+{{sample.description}}
+
+To run this sample:
+
+.. code-block:: bash
+
+ $ python {{sample.file}}
+{% if sample.show_help %}
+
+ {{get_help(sample.file)|indent}}
+{% endif %}
+
+
+{% endfor %}
+{% endif %}
+
+{% if cloud_client_library %}
+
+The client library
+-------------------------------------------------------------------------------
+
+This sample uses the `Google Cloud Client Library for Python`_.
+You can read the documentation for more details on API usage and use GitHub
+to `browse the source`_ and `report issues`_.
+
+.. _Google Cloud Client Library for Python:
+ https://googlecloudplatform.github.io/google-cloud-python/
+.. _browse the source:
+ https://github.com/GoogleCloudPlatform/google-cloud-python
+.. _report issues:
+ https://github.com/GoogleCloudPlatform/google-cloud-python/issues
+
+{% endif %}
+
+.. _Google Cloud SDK: https://cloud.google.com/sdk/
\ No newline at end of file
diff --git a/scripts/readme-gen/templates/auth.tmpl.rst b/scripts/readme-gen/templates/auth.tmpl.rst
new file mode 100644
index 0000000..1446b94
--- /dev/null
+++ b/scripts/readme-gen/templates/auth.tmpl.rst
@@ -0,0 +1,9 @@
+Authentication
+++++++++++++++
+
+This sample requires you to have authentication setup. Refer to the
+`Authentication Getting Started Guide`_ for instructions on setting up
+credentials for applications.
+
+.. _Authentication Getting Started Guide:
+ https://cloud.google.com/docs/authentication/getting-started
diff --git a/scripts/readme-gen/templates/auth_api_key.tmpl.rst b/scripts/readme-gen/templates/auth_api_key.tmpl.rst
new file mode 100644
index 0000000..11957ce
--- /dev/null
+++ b/scripts/readme-gen/templates/auth_api_key.tmpl.rst
@@ -0,0 +1,14 @@
+Authentication
+++++++++++++++
+
+Authentication for this service is done via an `API Key`_. To obtain an API
+Key:
+
+1. Open the `Cloud Platform Console`_
+2. Make sure that billing is enabled for your project.
+3. From the **Credentials** page, create a new **API Key** or use an existing
+ one for your project.
+
+.. _API Key:
+ https://developers.google.com/api-client-library/python/guide/aaa_apikeys
+.. _Cloud Console: https://console.cloud.google.com/project?_
diff --git a/scripts/readme-gen/templates/install_deps.tmpl.rst b/scripts/readme-gen/templates/install_deps.tmpl.rst
new file mode 100644
index 0000000..a0406db
--- /dev/null
+++ b/scripts/readme-gen/templates/install_deps.tmpl.rst
@@ -0,0 +1,29 @@
+Install Dependencies
+++++++++++++++++++++
+
+#. Clone python-docs-samples and change directory to the sample directory you want to use.
+
+ .. code-block:: bash
+
+ $ git clone https://github.com/GoogleCloudPlatform/python-docs-samples.git
+
+#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions.
+
+ .. _Python Development Environment Setup Guide:
+ https://cloud.google.com/python/setup
+
+#. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+.
+
+ .. code-block:: bash
+
+ $ virtualenv env
+ $ source env/bin/activate
+
+#. Install the dependencies needed to run the samples.
+
+ .. code-block:: bash
+
+ $ pip install -r requirements.txt
+
+.. _pip: https://pip.pypa.io/
+.. _virtualenv: https://virtualenv.pypa.io/
diff --git a/scripts/readme-gen/templates/install_portaudio.tmpl.rst b/scripts/readme-gen/templates/install_portaudio.tmpl.rst
new file mode 100644
index 0000000..5ea33d1
--- /dev/null
+++ b/scripts/readme-gen/templates/install_portaudio.tmpl.rst
@@ -0,0 +1,35 @@
+Install PortAudio
++++++++++++++++++
+
+Install `PortAudio`_. This is required by the `PyAudio`_ library to stream
+audio from your computer's microphone. PyAudio depends on PortAudio for cross-platform compatibility, and is installed differently depending on the
+platform.
+
+* For Mac OS X, you can use `Homebrew`_::
+
+ brew install portaudio
+
+ **Note**: if you encounter an error when running `pip install` that indicates
+ it can't find `portaudio.h`, try running `pip install` with the following
+ flags::
+
+ pip install --global-option='build_ext' \
+ --global-option='-I/usr/local/include' \
+ --global-option='-L/usr/local/lib' \
+ pyaudio
+
+* For Debian / Ubuntu Linux::
+
+ apt-get install portaudio19-dev python-all-dev
+
+* Windows may work without having to install PortAudio explicitly (it will get
+ installed with PyAudio).
+
+For more details, see the `PyAudio installation`_ page.
+
+
+.. _PyAudio: https://people.csail.mit.edu/hubert/pyaudio/
+.. _PortAudio: http://www.portaudio.com/
+.. _PyAudio installation:
+ https://people.csail.mit.edu/hubert/pyaudio/#downloads
+.. _Homebrew: http://brew.sh
diff --git a/setup.py b/setup.py
index 31d56ba..bae28ee 100644
--- a/setup.py
+++ b/setup.py
@@ -40,8 +40,8 @@
platforms="Posix; MacOS X; Windows",
include_package_data=True,
install_requires=(
- "google-api-core[grpc] >= 1.17.0, < 2.0.0dev",
- "proto-plus >= 0.4.0",
+ "google-api-core[grpc] >= 1.22.2, < 2.0.0dev",
+ "proto-plus >= 1.4.0",
),
python_requires=">=3.6",
setup_requires=["libcst >= 0.2.5"],
diff --git a/synth.metadata b/synth.metadata
index 55c0f24..b83959d 100644
--- a/synth.metadata
+++ b/synth.metadata
@@ -3,23 +3,22 @@
{
"git": {
"name": ".",
- "remote": "https://github.com/googleapis/python-memcache.git",
- "sha": "e34e2b1a2b47476cb6a0dcd932dcfd030018936f"
+ "remote": "git@github.com:googleapis/python-memcache",
+ "sha": "b068bfca843c0d792bb2b79f5b6b28fcc80ae7c8"
}
},
{
"git": {
- "name": "googleapis",
- "remote": "https://github.com/googleapis/googleapis.git",
- "sha": "eafa840ceec23b44a5c21670288107c661252711",
- "internalRef": "313488995"
+ "name": "synthtool",
+ "remote": "https://github.com/googleapis/synthtool.git",
+ "sha": "6b026e1443948dcfc0b9e3289c85e940eb70f694"
}
},
{
"git": {
"name": "synthtool",
"remote": "https://github.com/googleapis/synthtool.git",
- "sha": "71b8a272549c06b5768d00fa48d3ae990e871bec"
+ "sha": "6b026e1443948dcfc0b9e3289c85e940eb70f694"
}
}
],
@@ -30,7 +29,7 @@
"apiName": "memcache",
"apiVersion": "v1beta2",
"language": "python",
- "generator": "gapic-generator-python"
+ "generator": "bazel"
}
}
]
diff --git a/synth.py b/synth.py
index 619b015..4b2ffa1 100644
--- a/synth.py
+++ b/synth.py
@@ -19,53 +19,38 @@
import synthtool.gcp as gcp
from synthtool.languages import python
-gapic = gcp.GAPICMicrogenerator()
+gapic = gcp.GAPICBazel()
common = gcp.CommonTemplates()
+versions = ["v1beta2"]
+
# ----------------------------------------------------------------------------
# Generate memcache GAPIC layer
# ----------------------------------------------------------------------------
-library = gapic.py_library("memcache", "v1beta2")
-
-# TODO: remove /docs/memcache_v1beta2/*.rst files after fix is released in
-# gapic-generator-python 0.19.0
-excludes = [
- "setup.py",
- "docs/index.rst",
- "docs/memcache_v1beta2/services.rst",
- "docs/memcache_v1beta2/types.rst",
-]
-s.move(library, excludes=excludes)
+for version in versions:
+ library = gapic.py_library(
+ service="memcache",
+ version=version,
+ bazel_target=f"//google/cloud/memcache/{version}:memcache-{version}-py")
+
+ excludes = [
+ "setup.py",
+ "docs/index.rst",
+ ]
+ s.move(library, excludes=excludes)
# Add extra linebreak after bulleted list to appease sphinx
-s.replace("google/**/client.py", """- Must be unique within the user project / location""",
+s.replace("google/**/*client.py", """- Must be unique within the user project / location""",
"""- Must be unique within the user project / location\n""")
-s.replace("google/**/client.py", "- ``displayName``", "- ``displayName``\n")
+s.replace("google/**/*client.py", "- ``displayName``", "- ``displayName``\n")
-# correct license headers
-python.fix_pb2_headers()
-python.fix_pb2_grpc_headers()
# ----------------------------------------------------------------------------
# Add templated files
# ----------------------------------------------------------------------------
-templated_files = common.py_library(cov_level=100)
+templated_files = common.py_library(cov_level=100, microgenerator=True)
s.move(
templated_files, excludes=[".coveragerc"]
) # the microgenerator has a good coveragerc file
-s.replace(
- ".gitignore", "bigquery/docs/generated", "htmlcov"
-) # temporary hack to ignore htmlcov
-
-# Remove 2.7 and 3.5 tests from noxfile.py
-s.replace("noxfile.py", """\["2\.7", """, "[")
-s.replace("noxfile.py", """"3.5", """, "")
-
-# Expand flake errors permitted to accomodate the Microgenerator
-# TODO: remove extra error codes once issues below are resolved
-# E712: https://github.com/googleapis/gapic-generator-python/issues/322
-# F401: https://github.com/googleapis/gapic-generator-python/issues/324
-# F841: https://github.com/googleapis/gapic-generator-python/issues/323
-s.replace(".flake8", "ignore = .*", "ignore = E203, E266, E501, W503, F401, F841, E712")
s.shell.run(["nox", "-s", "blacken"], hide_output=False)
diff --git a/testing/.gitignore b/testing/.gitignore
new file mode 100644
index 0000000..b05fbd6
--- /dev/null
+++ b/testing/.gitignore
@@ -0,0 +1,3 @@
+test-env.sh
+service-account.json
+client-secrets.json
\ No newline at end of file
diff --git a/tests/unit/gapic/memcache_v1beta2/__init__.py b/tests/unit/gapic/memcache_v1beta2/__init__.py
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/tests/unit/gapic/memcache_v1beta2/__init__.py
@@ -0,0 +1 @@
+
diff --git a/tests/unit/gapic/memcache_v1beta2/test_cloud_memcache.py b/tests/unit/gapic/memcache_v1beta2/test_cloud_memcache.py
new file mode 100644
index 0000000..3050c18
--- /dev/null
+++ b/tests/unit/gapic/memcache_v1beta2/test_cloud_memcache.py
@@ -0,0 +1,2529 @@
+# -*- coding: utf-8 -*-
+
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import os
+import mock
+
+import grpc
+from grpc.experimental import aio
+import math
+import pytest
+from proto.marshal.rules.dates import DurationRule, TimestampRule
+
+from google import auth
+from google.api_core import client_options
+from google.api_core import exceptions
+from google.api_core import future
+from google.api_core import gapic_v1
+from google.api_core import grpc_helpers
+from google.api_core import grpc_helpers_async
+from google.api_core import operation_async # type: ignore
+from google.api_core import operations_v1
+from google.auth import credentials
+from google.auth.exceptions import MutualTLSChannelError
+from google.cloud.memcache_v1beta2.services.cloud_memcache import (
+ CloudMemcacheAsyncClient,
+)
+from google.cloud.memcache_v1beta2.services.cloud_memcache import CloudMemcacheClient
+from google.cloud.memcache_v1beta2.services.cloud_memcache import pagers
+from google.cloud.memcache_v1beta2.services.cloud_memcache import transports
+from google.cloud.memcache_v1beta2.types import cloud_memcache
+from google.longrunning import operations_pb2
+from google.oauth2 import service_account
+from google.protobuf import field_mask_pb2 as field_mask # type: ignore
+from google.protobuf import timestamp_pb2 as timestamp # type: ignore
+
+
+def client_cert_source_callback():
+ return b"cert bytes", b"key bytes"
+
+
+# If default endpoint is localhost, then default mtls endpoint will be the same.
+# This method modifies the default endpoint so the client can produce a different
+# mtls endpoint for endpoint testing purposes.
+def modify_default_endpoint(client):
+ return (
+ "foo.googleapis.com"
+ if ("localhost" in client.DEFAULT_ENDPOINT)
+ else client.DEFAULT_ENDPOINT
+ )
+
+
+def test__get_default_mtls_endpoint():
+ api_endpoint = "example.googleapis.com"
+ api_mtls_endpoint = "example.mtls.googleapis.com"
+ sandbox_endpoint = "example.sandbox.googleapis.com"
+ sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
+ non_googleapi = "api.example.com"
+
+ assert CloudMemcacheClient._get_default_mtls_endpoint(None) is None
+ assert (
+ CloudMemcacheClient._get_default_mtls_endpoint(api_endpoint)
+ == api_mtls_endpoint
+ )
+ assert (
+ CloudMemcacheClient._get_default_mtls_endpoint(api_mtls_endpoint)
+ == api_mtls_endpoint
+ )
+ assert (
+ CloudMemcacheClient._get_default_mtls_endpoint(sandbox_endpoint)
+ == sandbox_mtls_endpoint
+ )
+ assert (
+ CloudMemcacheClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
+ == sandbox_mtls_endpoint
+ )
+ assert (
+ CloudMemcacheClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class", [CloudMemcacheClient, CloudMemcacheAsyncClient]
+)
+def test_cloud_memcache_client_from_service_account_file(client_class):
+ creds = credentials.AnonymousCredentials()
+ with mock.patch.object(
+ service_account.Credentials, "from_service_account_file"
+ ) as factory:
+ factory.return_value = creds
+ client = client_class.from_service_account_file("dummy/file/path.json")
+ assert client.transport._credentials == creds
+
+ client = client_class.from_service_account_json("dummy/file/path.json")
+ assert client.transport._credentials == creds
+
+ assert client.transport._host == "memcache.googleapis.com:443"
+
+
+def test_cloud_memcache_client_get_transport_class():
+ transport = CloudMemcacheClient.get_transport_class()
+ assert transport == transports.CloudMemcacheGrpcTransport
+
+ transport = CloudMemcacheClient.get_transport_class("grpc")
+ assert transport == transports.CloudMemcacheGrpcTransport
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name",
+ [
+ (CloudMemcacheClient, transports.CloudMemcacheGrpcTransport, "grpc"),
+ (
+ CloudMemcacheAsyncClient,
+ transports.CloudMemcacheGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ ),
+ ],
+)
+@mock.patch.object(
+ CloudMemcacheClient,
+ "DEFAULT_ENDPOINT",
+ modify_default_endpoint(CloudMemcacheClient),
+)
+@mock.patch.object(
+ CloudMemcacheAsyncClient,
+ "DEFAULT_ENDPOINT",
+ modify_default_endpoint(CloudMemcacheAsyncClient),
+)
+def test_cloud_memcache_client_client_options(
+ client_class, transport_class, transport_name
+):
+ # Check that if channel is provided we won't create a new one.
+ with mock.patch.object(CloudMemcacheClient, "get_transport_class") as gtc:
+ transport = transport_class(credentials=credentials.AnonymousCredentials())
+ client = client_class(transport=transport)
+ gtc.assert_not_called()
+
+ # Check that if channel is provided via str we will create a new one.
+ with mock.patch.object(CloudMemcacheClient, "get_transport_class") as gtc:
+ client = client_class(transport=transport_name)
+ gtc.assert_called()
+
+ # Check the case api_endpoint is provided.
+ options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host="squid.clam.whelk",
+ scopes=None,
+ ssl_channel_credentials=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ )
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
+ # "never".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class()
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ ssl_channel_credentials=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ )
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
+ # "always".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class()
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client.DEFAULT_MTLS_ENDPOINT,
+ scopes=None,
+ ssl_channel_credentials=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ )
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
+ # unsupported value.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
+ with pytest.raises(MutualTLSChannelError):
+ client = client_class()
+
+ # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
+ ):
+ with pytest.raises(ValueError):
+ client = client_class()
+
+ # Check the case quota_project_id is provided
+ options = client_options.ClientOptions(quota_project_id="octopus")
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ ssl_channel_credentials=None,
+ quota_project_id="octopus",
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,use_client_cert_env",
+ [
+ (CloudMemcacheClient, transports.CloudMemcacheGrpcTransport, "grpc", "true"),
+ (
+ CloudMemcacheAsyncClient,
+ transports.CloudMemcacheGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ "true",
+ ),
+ (CloudMemcacheClient, transports.CloudMemcacheGrpcTransport, "grpc", "false"),
+ (
+ CloudMemcacheAsyncClient,
+ transports.CloudMemcacheGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ "false",
+ ),
+ ],
+)
+@mock.patch.object(
+ CloudMemcacheClient,
+ "DEFAULT_ENDPOINT",
+ modify_default_endpoint(CloudMemcacheClient),
+)
+@mock.patch.object(
+ CloudMemcacheAsyncClient,
+ "DEFAULT_ENDPOINT",
+ modify_default_endpoint(CloudMemcacheAsyncClient),
+)
+@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
+def test_cloud_memcache_client_mtls_env_auto(
+ client_class, transport_class, transport_name, use_client_cert_env
+):
+ # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
+ # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
+
+ # Check the case client_cert_source is provided. Whether client cert is used depends on
+ # GOOGLE_API_USE_CLIENT_CERTIFICATE value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
+ ):
+ options = client_options.ClientOptions(
+ client_cert_source=client_cert_source_callback
+ )
+ with mock.patch.object(transport_class, "__init__") as patched:
+ ssl_channel_creds = mock.Mock()
+ with mock.patch(
+ "grpc.ssl_channel_credentials", return_value=ssl_channel_creds
+ ):
+ patched.return_value = None
+ client = client_class(client_options=options)
+
+ if use_client_cert_env == "false":
+ expected_ssl_channel_creds = None
+ expected_host = client.DEFAULT_ENDPOINT
+ else:
+ expected_ssl_channel_creds = ssl_channel_creds
+ expected_host = client.DEFAULT_MTLS_ENDPOINT
+
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=expected_host,
+ scopes=None,
+ ssl_channel_credentials=expected_ssl_channel_creds,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ )
+
+ # Check the case ADC client cert is provided. Whether client cert is used depends on
+ # GOOGLE_API_USE_CLIENT_CERTIFICATE value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
+ ):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ with mock.patch(
+ "google.auth.transport.grpc.SslCredentials.__init__", return_value=None
+ ):
+ with mock.patch(
+ "google.auth.transport.grpc.SslCredentials.is_mtls",
+ new_callable=mock.PropertyMock,
+ ) as is_mtls_mock:
+ with mock.patch(
+ "google.auth.transport.grpc.SslCredentials.ssl_credentials",
+ new_callable=mock.PropertyMock,
+ ) as ssl_credentials_mock:
+ if use_client_cert_env == "false":
+ is_mtls_mock.return_value = False
+ ssl_credentials_mock.return_value = None
+ expected_host = client.DEFAULT_ENDPOINT
+ expected_ssl_channel_creds = None
+ else:
+ is_mtls_mock.return_value = True
+ ssl_credentials_mock.return_value = mock.Mock()
+ expected_host = client.DEFAULT_MTLS_ENDPOINT
+ expected_ssl_channel_creds = (
+ ssl_credentials_mock.return_value
+ )
+
+ patched.return_value = None
+ client = client_class()
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=expected_host,
+ scopes=None,
+ ssl_channel_credentials=expected_ssl_channel_creds,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ )
+
+ # Check the case client_cert_source and ADC client cert are not provided.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
+ ):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ with mock.patch(
+ "google.auth.transport.grpc.SslCredentials.__init__", return_value=None
+ ):
+ with mock.patch(
+ "google.auth.transport.grpc.SslCredentials.is_mtls",
+ new_callable=mock.PropertyMock,
+ ) as is_mtls_mock:
+ is_mtls_mock.return_value = False
+ patched.return_value = None
+ client = client_class()
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ ssl_channel_credentials=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name",
+ [
+ (CloudMemcacheClient, transports.CloudMemcacheGrpcTransport, "grpc"),
+ (
+ CloudMemcacheAsyncClient,
+ transports.CloudMemcacheGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ ),
+ ],
+)
+def test_cloud_memcache_client_client_options_scopes(
+ client_class, transport_class, transport_name
+):
+ # Check the case scopes are provided.
+ options = client_options.ClientOptions(scopes=["1", "2"],)
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client.DEFAULT_ENDPOINT,
+ scopes=["1", "2"],
+ ssl_channel_credentials=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name",
+ [
+ (CloudMemcacheClient, transports.CloudMemcacheGrpcTransport, "grpc"),
+ (
+ CloudMemcacheAsyncClient,
+ transports.CloudMemcacheGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ ),
+ ],
+)
+def test_cloud_memcache_client_client_options_credentials_file(
+ client_class, transport_class, transport_name
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ ssl_channel_credentials=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ )
+
+
+def test_cloud_memcache_client_client_options_from_dict():
+ with mock.patch(
+ "google.cloud.memcache_v1beta2.services.cloud_memcache.transports.CloudMemcacheGrpcTransport.__init__"
+ ) as grpc_transport:
+ grpc_transport.return_value = None
+ client = CloudMemcacheClient(
+ client_options={"api_endpoint": "squid.clam.whelk"}
+ )
+ grpc_transport.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host="squid.clam.whelk",
+ scopes=None,
+ ssl_channel_credentials=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ )
+
+
+def test_list_instances(
+ transport: str = "grpc", request_type=cloud_memcache.ListInstancesRequest
+):
+ client = CloudMemcacheClient(
+ credentials=credentials.AnonymousCredentials(), transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_instances), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = cloud_memcache.ListInstancesResponse(
+ next_page_token="next_page_token_value", unreachable=["unreachable_value"],
+ )
+
+ response = client.list_instances(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == cloud_memcache.ListInstancesRequest()
+
+ # Establish that the response is the type that we expect.
+
+ assert isinstance(response, pagers.ListInstancesPager)
+
+ assert response.next_page_token == "next_page_token_value"
+
+ assert response.unreachable == ["unreachable_value"]
+
+
+def test_list_instances_from_dict():
+ test_list_instances(request_type=dict)
+
+
+@pytest.mark.asyncio
+async def test_list_instances_async(
+ transport: str = "grpc_asyncio", request_type=cloud_memcache.ListInstancesRequest
+):
+ client = CloudMemcacheAsyncClient(
+ credentials=credentials.AnonymousCredentials(), transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_instances), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ cloud_memcache.ListInstancesResponse(
+ next_page_token="next_page_token_value",
+ unreachable=["unreachable_value"],
+ )
+ )
+
+ response = await client.list_instances(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == cloud_memcache.ListInstancesRequest()
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListInstancesAsyncPager)
+
+ assert response.next_page_token == "next_page_token_value"
+
+ assert response.unreachable == ["unreachable_value"]
+
+
+@pytest.mark.asyncio
+async def test_list_instances_async_from_dict():
+ await test_list_instances_async(request_type=dict)
+
+
+def test_list_instances_field_headers():
+ client = CloudMemcacheClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = cloud_memcache.ListInstancesRequest()
+ request.parent = "parent/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_instances), "__call__") as call:
+ call.return_value = cloud_memcache.ListInstancesResponse()
+
+ client.list_instances(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_list_instances_field_headers_async():
+ client = CloudMemcacheAsyncClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = cloud_memcache.ListInstancesRequest()
+ request.parent = "parent/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_instances), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ cloud_memcache.ListInstancesResponse()
+ )
+
+ await client.list_instances(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
+
+
+def test_list_instances_flattened():
+ client = CloudMemcacheClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_instances), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = cloud_memcache.ListInstancesResponse()
+
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.list_instances(parent="parent_value",)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0].parent == "parent_value"
+
+
+def test_list_instances_flattened_error():
+ client = CloudMemcacheClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.list_instances(
+ cloud_memcache.ListInstancesRequest(), parent="parent_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_list_instances_flattened_async():
+ client = CloudMemcacheAsyncClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_instances), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = cloud_memcache.ListInstancesResponse()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ cloud_memcache.ListInstancesResponse()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.list_instances(parent="parent_value",)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0].parent == "parent_value"
+
+
+@pytest.mark.asyncio
+async def test_list_instances_flattened_error_async():
+ client = CloudMemcacheAsyncClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.list_instances(
+ cloud_memcache.ListInstancesRequest(), parent="parent_value",
+ )
+
+
+def test_list_instances_pager():
+ client = CloudMemcacheClient(credentials=credentials.AnonymousCredentials,)
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_instances), "__call__") as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ cloud_memcache.ListInstancesResponse(
+ resources=[
+ cloud_memcache.Instance(),
+ cloud_memcache.Instance(),
+ cloud_memcache.Instance(),
+ ],
+ next_page_token="abc",
+ ),
+ cloud_memcache.ListInstancesResponse(resources=[], next_page_token="def",),
+ cloud_memcache.ListInstancesResponse(
+ resources=[cloud_memcache.Instance(),], next_page_token="ghi",
+ ),
+ cloud_memcache.ListInstancesResponse(
+ resources=[cloud_memcache.Instance(), cloud_memcache.Instance(),],
+ ),
+ RuntimeError,
+ )
+
+ metadata = ()
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
+ )
+ pager = client.list_instances(request={})
+
+ assert pager._metadata == metadata
+
+ results = [i for i in pager]
+ assert len(results) == 6
+ assert all(isinstance(i, cloud_memcache.Instance) for i in results)
+
+
+def test_list_instances_pages():
+ client = CloudMemcacheClient(credentials=credentials.AnonymousCredentials,)
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_instances), "__call__") as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ cloud_memcache.ListInstancesResponse(
+ resources=[
+ cloud_memcache.Instance(),
+ cloud_memcache.Instance(),
+ cloud_memcache.Instance(),
+ ],
+ next_page_token="abc",
+ ),
+ cloud_memcache.ListInstancesResponse(resources=[], next_page_token="def",),
+ cloud_memcache.ListInstancesResponse(
+ resources=[cloud_memcache.Instance(),], next_page_token="ghi",
+ ),
+ cloud_memcache.ListInstancesResponse(
+ resources=[cloud_memcache.Instance(), cloud_memcache.Instance(),],
+ ),
+ RuntimeError,
+ )
+ pages = list(client.list_instances(request={}).pages)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+@pytest.mark.asyncio
+async def test_list_instances_async_pager():
+ client = CloudMemcacheAsyncClient(credentials=credentials.AnonymousCredentials,)
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_instances), "__call__", new_callable=mock.AsyncMock
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ cloud_memcache.ListInstancesResponse(
+ resources=[
+ cloud_memcache.Instance(),
+ cloud_memcache.Instance(),
+ cloud_memcache.Instance(),
+ ],
+ next_page_token="abc",
+ ),
+ cloud_memcache.ListInstancesResponse(resources=[], next_page_token="def",),
+ cloud_memcache.ListInstancesResponse(
+ resources=[cloud_memcache.Instance(),], next_page_token="ghi",
+ ),
+ cloud_memcache.ListInstancesResponse(
+ resources=[cloud_memcache.Instance(), cloud_memcache.Instance(),],
+ ),
+ RuntimeError,
+ )
+ async_pager = await client.list_instances(request={},)
+ assert async_pager.next_page_token == "abc"
+ responses = []
+ async for response in async_pager:
+ responses.append(response)
+
+ assert len(responses) == 6
+ assert all(isinstance(i, cloud_memcache.Instance) for i in responses)
+
+
+@pytest.mark.asyncio
+async def test_list_instances_async_pages():
+ client = CloudMemcacheAsyncClient(credentials=credentials.AnonymousCredentials,)
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_instances), "__call__", new_callable=mock.AsyncMock
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ cloud_memcache.ListInstancesResponse(
+ resources=[
+ cloud_memcache.Instance(),
+ cloud_memcache.Instance(),
+ cloud_memcache.Instance(),
+ ],
+ next_page_token="abc",
+ ),
+ cloud_memcache.ListInstancesResponse(resources=[], next_page_token="def",),
+ cloud_memcache.ListInstancesResponse(
+ resources=[cloud_memcache.Instance(),], next_page_token="ghi",
+ ),
+ cloud_memcache.ListInstancesResponse(
+ resources=[cloud_memcache.Instance(), cloud_memcache.Instance(),],
+ ),
+ RuntimeError,
+ )
+ pages = []
+ async for page_ in (await client.list_instances(request={})).pages:
+ pages.append(page_)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+def test_get_instance(
+ transport: str = "grpc", request_type=cloud_memcache.GetInstanceRequest
+):
+ client = CloudMemcacheClient(
+ credentials=credentials.AnonymousCredentials(), transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_instance), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = cloud_memcache.Instance(
+ name="name_value",
+ display_name="display_name_value",
+ authorized_network="authorized_network_value",
+ zones=["zones_value"],
+ node_count=1070,
+ memcache_version=cloud_memcache.MemcacheVersion.MEMCACHE_1_5,
+ state=cloud_memcache.Instance.State.CREATING,
+ memcache_full_version="memcache_full_version_value",
+ discovery_endpoint="discovery_endpoint_value",
+ )
+
+ response = client.get_instance(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == cloud_memcache.GetInstanceRequest()
+
+ # Establish that the response is the type that we expect.
+
+ assert isinstance(response, cloud_memcache.Instance)
+
+ assert response.name == "name_value"
+
+ assert response.display_name == "display_name_value"
+
+ assert response.authorized_network == "authorized_network_value"
+
+ assert response.zones == ["zones_value"]
+
+ assert response.node_count == 1070
+
+ assert response.memcache_version == cloud_memcache.MemcacheVersion.MEMCACHE_1_5
+
+ assert response.state == cloud_memcache.Instance.State.CREATING
+
+ assert response.memcache_full_version == "memcache_full_version_value"
+
+ assert response.discovery_endpoint == "discovery_endpoint_value"
+
+
+def test_get_instance_from_dict():
+ test_get_instance(request_type=dict)
+
+
+@pytest.mark.asyncio
+async def test_get_instance_async(
+ transport: str = "grpc_asyncio", request_type=cloud_memcache.GetInstanceRequest
+):
+ client = CloudMemcacheAsyncClient(
+ credentials=credentials.AnonymousCredentials(), transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_instance), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ cloud_memcache.Instance(
+ name="name_value",
+ display_name="display_name_value",
+ authorized_network="authorized_network_value",
+ zones=["zones_value"],
+ node_count=1070,
+ memcache_version=cloud_memcache.MemcacheVersion.MEMCACHE_1_5,
+ state=cloud_memcache.Instance.State.CREATING,
+ memcache_full_version="memcache_full_version_value",
+ discovery_endpoint="discovery_endpoint_value",
+ )
+ )
+
+ response = await client.get_instance(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == cloud_memcache.GetInstanceRequest()
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, cloud_memcache.Instance)
+
+ assert response.name == "name_value"
+
+ assert response.display_name == "display_name_value"
+
+ assert response.authorized_network == "authorized_network_value"
+
+ assert response.zones == ["zones_value"]
+
+ assert response.node_count == 1070
+
+ assert response.memcache_version == cloud_memcache.MemcacheVersion.MEMCACHE_1_5
+
+ assert response.state == cloud_memcache.Instance.State.CREATING
+
+ assert response.memcache_full_version == "memcache_full_version_value"
+
+ assert response.discovery_endpoint == "discovery_endpoint_value"
+
+
+@pytest.mark.asyncio
+async def test_get_instance_async_from_dict():
+ await test_get_instance_async(request_type=dict)
+
+
+def test_get_instance_field_headers():
+ client = CloudMemcacheClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = cloud_memcache.GetInstanceRequest()
+ request.name = "name/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_instance), "__call__") as call:
+ call.return_value = cloud_memcache.Instance()
+
+ client.get_instance(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_instance_field_headers_async():
+ client = CloudMemcacheAsyncClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = cloud_memcache.GetInstanceRequest()
+ request.name = "name/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_instance), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ cloud_memcache.Instance()
+ )
+
+ await client.get_instance(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
+
+
+def test_get_instance_flattened():
+ client = CloudMemcacheClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_instance), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = cloud_memcache.Instance()
+
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.get_instance(name="name_value",)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0].name == "name_value"
+
+
+def test_get_instance_flattened_error():
+ client = CloudMemcacheClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.get_instance(
+ cloud_memcache.GetInstanceRequest(), name="name_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_get_instance_flattened_async():
+ client = CloudMemcacheAsyncClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_instance), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = cloud_memcache.Instance()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ cloud_memcache.Instance()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.get_instance(name="name_value",)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0].name == "name_value"
+
+
+@pytest.mark.asyncio
+async def test_get_instance_flattened_error_async():
+ client = CloudMemcacheAsyncClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.get_instance(
+ cloud_memcache.GetInstanceRequest(), name="name_value",
+ )
+
+
+def test_create_instance(
+ transport: str = "grpc", request_type=cloud_memcache.CreateInstanceRequest
+):
+ client = CloudMemcacheClient(
+ credentials=credentials.AnonymousCredentials(), transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.create_instance), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/spam")
+
+ response = client.create_instance(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == cloud_memcache.CreateInstanceRequest()
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+def test_create_instance_from_dict():
+ test_create_instance(request_type=dict)
+
+
+@pytest.mark.asyncio
+async def test_create_instance_async(
+ transport: str = "grpc_asyncio", request_type=cloud_memcache.CreateInstanceRequest
+):
+ client = CloudMemcacheAsyncClient(
+ credentials=credentials.AnonymousCredentials(), transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.create_instance), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+
+ response = await client.create_instance(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == cloud_memcache.CreateInstanceRequest()
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+@pytest.mark.asyncio
+async def test_create_instance_async_from_dict():
+ await test_create_instance_async(request_type=dict)
+
+
+def test_create_instance_field_headers():
+ client = CloudMemcacheClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = cloud_memcache.CreateInstanceRequest()
+ request.parent = "parent/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.create_instance), "__call__") as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+
+ client.create_instance(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_create_instance_field_headers_async():
+ client = CloudMemcacheAsyncClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = cloud_memcache.CreateInstanceRequest()
+ request.parent = "parent/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.create_instance), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/op")
+ )
+
+ await client.create_instance(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
+
+
+def test_create_instance_flattened():
+ client = CloudMemcacheClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.create_instance), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.create_instance(
+ parent="parent_value",
+ instance_id="instance_id_value",
+ resource=cloud_memcache.Instance(name="name_value"),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0].parent == "parent_value"
+
+ assert args[0].instance_id == "instance_id_value"
+
+ assert args[0].resource == cloud_memcache.Instance(name="name_value")
+
+
+def test_create_instance_flattened_error():
+ client = CloudMemcacheClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.create_instance(
+ cloud_memcache.CreateInstanceRequest(),
+ parent="parent_value",
+ instance_id="instance_id_value",
+ resource=cloud_memcache.Instance(name="name_value"),
+ )
+
+
+@pytest.mark.asyncio
+async def test_create_instance_flattened_async():
+ client = CloudMemcacheAsyncClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.create_instance), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.create_instance(
+ parent="parent_value",
+ instance_id="instance_id_value",
+ resource=cloud_memcache.Instance(name="name_value"),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0].parent == "parent_value"
+
+ assert args[0].instance_id == "instance_id_value"
+
+ assert args[0].resource == cloud_memcache.Instance(name="name_value")
+
+
+@pytest.mark.asyncio
+async def test_create_instance_flattened_error_async():
+ client = CloudMemcacheAsyncClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.create_instance(
+ cloud_memcache.CreateInstanceRequest(),
+ parent="parent_value",
+ instance_id="instance_id_value",
+ resource=cloud_memcache.Instance(name="name_value"),
+ )
+
+
+def test_update_instance(
+ transport: str = "grpc", request_type=cloud_memcache.UpdateInstanceRequest
+):
+ client = CloudMemcacheClient(
+ credentials=credentials.AnonymousCredentials(), transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.update_instance), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/spam")
+
+ response = client.update_instance(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == cloud_memcache.UpdateInstanceRequest()
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+def test_update_instance_from_dict():
+ test_update_instance(request_type=dict)
+
+
+@pytest.mark.asyncio
+async def test_update_instance_async(
+ transport: str = "grpc_asyncio", request_type=cloud_memcache.UpdateInstanceRequest
+):
+ client = CloudMemcacheAsyncClient(
+ credentials=credentials.AnonymousCredentials(), transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.update_instance), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+
+ response = await client.update_instance(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == cloud_memcache.UpdateInstanceRequest()
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+@pytest.mark.asyncio
+async def test_update_instance_async_from_dict():
+ await test_update_instance_async(request_type=dict)
+
+
+def test_update_instance_field_headers():
+ client = CloudMemcacheClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = cloud_memcache.UpdateInstanceRequest()
+ request.resource.name = "resource.name/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.update_instance), "__call__") as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+
+ client.update_instance(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert ("x-goog-request-params", "resource.name=resource.name/value",) in kw[
+ "metadata"
+ ]
+
+
+@pytest.mark.asyncio
+async def test_update_instance_field_headers_async():
+ client = CloudMemcacheAsyncClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = cloud_memcache.UpdateInstanceRequest()
+ request.resource.name = "resource.name/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.update_instance), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/op")
+ )
+
+ await client.update_instance(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert ("x-goog-request-params", "resource.name=resource.name/value",) in kw[
+ "metadata"
+ ]
+
+
+def test_update_instance_flattened():
+ client = CloudMemcacheClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.update_instance), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.update_instance(
+ update_mask=field_mask.FieldMask(paths=["paths_value"]),
+ resource=cloud_memcache.Instance(name="name_value"),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"])
+
+ assert args[0].resource == cloud_memcache.Instance(name="name_value")
+
+
+def test_update_instance_flattened_error():
+ client = CloudMemcacheClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.update_instance(
+ cloud_memcache.UpdateInstanceRequest(),
+ update_mask=field_mask.FieldMask(paths=["paths_value"]),
+ resource=cloud_memcache.Instance(name="name_value"),
+ )
+
+
+@pytest.mark.asyncio
+async def test_update_instance_flattened_async():
+ client = CloudMemcacheAsyncClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.update_instance), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.update_instance(
+ update_mask=field_mask.FieldMask(paths=["paths_value"]),
+ resource=cloud_memcache.Instance(name="name_value"),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"])
+
+ assert args[0].resource == cloud_memcache.Instance(name="name_value")
+
+
+@pytest.mark.asyncio
+async def test_update_instance_flattened_error_async():
+ client = CloudMemcacheAsyncClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.update_instance(
+ cloud_memcache.UpdateInstanceRequest(),
+ update_mask=field_mask.FieldMask(paths=["paths_value"]),
+ resource=cloud_memcache.Instance(name="name_value"),
+ )
+
+
+def test_update_parameters(
+ transport: str = "grpc", request_type=cloud_memcache.UpdateParametersRequest
+):
+ client = CloudMemcacheClient(
+ credentials=credentials.AnonymousCredentials(), transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_parameters), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/spam")
+
+ response = client.update_parameters(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == cloud_memcache.UpdateParametersRequest()
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+def test_update_parameters_from_dict():
+ test_update_parameters(request_type=dict)
+
+
+@pytest.mark.asyncio
+async def test_update_parameters_async(
+ transport: str = "grpc_asyncio", request_type=cloud_memcache.UpdateParametersRequest
+):
+ client = CloudMemcacheAsyncClient(
+ credentials=credentials.AnonymousCredentials(), transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_parameters), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+
+ response = await client.update_parameters(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == cloud_memcache.UpdateParametersRequest()
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+@pytest.mark.asyncio
+async def test_update_parameters_async_from_dict():
+ await test_update_parameters_async(request_type=dict)
+
+
+def test_update_parameters_field_headers():
+ client = CloudMemcacheClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = cloud_memcache.UpdateParametersRequest()
+ request.name = "name/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_parameters), "__call__"
+ ) as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+
+ client.update_parameters(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_update_parameters_field_headers_async():
+ client = CloudMemcacheAsyncClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = cloud_memcache.UpdateParametersRequest()
+ request.name = "name/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_parameters), "__call__"
+ ) as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/op")
+ )
+
+ await client.update_parameters(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
+
+
+def test_update_parameters_flattened():
+ client = CloudMemcacheClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_parameters), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.update_parameters(
+ name="name_value",
+ update_mask=field_mask.FieldMask(paths=["paths_value"]),
+ parameters=cloud_memcache.MemcacheParameters(id="id_value"),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0].name == "name_value"
+
+ assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"])
+
+ assert args[0].parameters == cloud_memcache.MemcacheParameters(id="id_value")
+
+
+def test_update_parameters_flattened_error():
+ client = CloudMemcacheClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.update_parameters(
+ cloud_memcache.UpdateParametersRequest(),
+ name="name_value",
+ update_mask=field_mask.FieldMask(paths=["paths_value"]),
+ parameters=cloud_memcache.MemcacheParameters(id="id_value"),
+ )
+
+
+@pytest.mark.asyncio
+async def test_update_parameters_flattened_async():
+ client = CloudMemcacheAsyncClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.update_parameters), "__call__"
+ ) as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.update_parameters(
+ name="name_value",
+ update_mask=field_mask.FieldMask(paths=["paths_value"]),
+ parameters=cloud_memcache.MemcacheParameters(id="id_value"),
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0].name == "name_value"
+
+ assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"])
+
+ assert args[0].parameters == cloud_memcache.MemcacheParameters(id="id_value")
+
+
+@pytest.mark.asyncio
+async def test_update_parameters_flattened_error_async():
+ client = CloudMemcacheAsyncClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.update_parameters(
+ cloud_memcache.UpdateParametersRequest(),
+ name="name_value",
+ update_mask=field_mask.FieldMask(paths=["paths_value"]),
+ parameters=cloud_memcache.MemcacheParameters(id="id_value"),
+ )
+
+
+def test_delete_instance(
+ transport: str = "grpc", request_type=cloud_memcache.DeleteInstanceRequest
+):
+ client = CloudMemcacheClient(
+ credentials=credentials.AnonymousCredentials(), transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_instance), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/spam")
+
+ response = client.delete_instance(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == cloud_memcache.DeleteInstanceRequest()
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+def test_delete_instance_from_dict():
+ test_delete_instance(request_type=dict)
+
+
+@pytest.mark.asyncio
+async def test_delete_instance_async(
+ transport: str = "grpc_asyncio", request_type=cloud_memcache.DeleteInstanceRequest
+):
+ client = CloudMemcacheAsyncClient(
+ credentials=credentials.AnonymousCredentials(), transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_instance), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+
+ response = await client.delete_instance(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == cloud_memcache.DeleteInstanceRequest()
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+@pytest.mark.asyncio
+async def test_delete_instance_async_from_dict():
+ await test_delete_instance_async(request_type=dict)
+
+
+def test_delete_instance_field_headers():
+ client = CloudMemcacheClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = cloud_memcache.DeleteInstanceRequest()
+ request.name = "name/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_instance), "__call__") as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+
+ client.delete_instance(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_delete_instance_field_headers_async():
+ client = CloudMemcacheAsyncClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = cloud_memcache.DeleteInstanceRequest()
+ request.name = "name/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_instance), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/op")
+ )
+
+ await client.delete_instance(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
+
+
+def test_delete_instance_flattened():
+ client = CloudMemcacheClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_instance), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.delete_instance(name="name_value",)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0].name == "name_value"
+
+
+def test_delete_instance_flattened_error():
+ client = CloudMemcacheClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.delete_instance(
+ cloud_memcache.DeleteInstanceRequest(), name="name_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_delete_instance_flattened_async():
+ client = CloudMemcacheAsyncClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_instance), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.delete_instance(name="name_value",)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0].name == "name_value"
+
+
+@pytest.mark.asyncio
+async def test_delete_instance_flattened_error_async():
+ client = CloudMemcacheAsyncClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.delete_instance(
+ cloud_memcache.DeleteInstanceRequest(), name="name_value",
+ )
+
+
+def test_apply_parameters(
+ transport: str = "grpc", request_type=cloud_memcache.ApplyParametersRequest
+):
+ client = CloudMemcacheClient(
+ credentials=credentials.AnonymousCredentials(), transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.apply_parameters), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/spam")
+
+ response = client.apply_parameters(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == cloud_memcache.ApplyParametersRequest()
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+def test_apply_parameters_from_dict():
+ test_apply_parameters(request_type=dict)
+
+
+@pytest.mark.asyncio
+async def test_apply_parameters_async(
+ transport: str = "grpc_asyncio", request_type=cloud_memcache.ApplyParametersRequest
+):
+ client = CloudMemcacheAsyncClient(
+ credentials=credentials.AnonymousCredentials(), transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.apply_parameters), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+
+ response = await client.apply_parameters(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0] == cloud_memcache.ApplyParametersRequest()
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+@pytest.mark.asyncio
+async def test_apply_parameters_async_from_dict():
+ await test_apply_parameters_async(request_type=dict)
+
+
+def test_apply_parameters_field_headers():
+ client = CloudMemcacheClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = cloud_memcache.ApplyParametersRequest()
+ request.name = "name/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.apply_parameters), "__call__") as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+
+ client.apply_parameters(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_apply_parameters_field_headers_async():
+ client = CloudMemcacheAsyncClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = cloud_memcache.ApplyParametersRequest()
+ request.name = "name/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.apply_parameters), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/op")
+ )
+
+ await client.apply_parameters(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
+
+
+def test_apply_parameters_flattened():
+ client = CloudMemcacheClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.apply_parameters), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.apply_parameters(
+ name="name_value", node_ids=["node_ids_value"], apply_all=True,
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0].name == "name_value"
+
+ assert args[0].node_ids == ["node_ids_value"]
+
+ assert args[0].apply_all == True
+
+
+def test_apply_parameters_flattened_error():
+ client = CloudMemcacheClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.apply_parameters(
+ cloud_memcache.ApplyParametersRequest(),
+ name="name_value",
+ node_ids=["node_ids_value"],
+ apply_all=True,
+ )
+
+
+@pytest.mark.asyncio
+async def test_apply_parameters_flattened_async():
+ client = CloudMemcacheAsyncClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.apply_parameters), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.apply_parameters(
+ name="name_value", node_ids=["node_ids_value"], apply_all=True,
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+
+ assert args[0].name == "name_value"
+
+ assert args[0].node_ids == ["node_ids_value"]
+
+ assert args[0].apply_all == True
+
+
+@pytest.mark.asyncio
+async def test_apply_parameters_flattened_error_async():
+ client = CloudMemcacheAsyncClient(credentials=credentials.AnonymousCredentials(),)
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.apply_parameters(
+ cloud_memcache.ApplyParametersRequest(),
+ name="name_value",
+ node_ids=["node_ids_value"],
+ apply_all=True,
+ )
+
+
+def test_credentials_transport_error():
+ # It is an error to provide credentials and a transport instance.
+ transport = transports.CloudMemcacheGrpcTransport(
+ credentials=credentials.AnonymousCredentials(),
+ )
+ with pytest.raises(ValueError):
+ client = CloudMemcacheClient(
+ credentials=credentials.AnonymousCredentials(), transport=transport,
+ )
+
+ # It is an error to provide a credentials file and a transport instance.
+ transport = transports.CloudMemcacheGrpcTransport(
+ credentials=credentials.AnonymousCredentials(),
+ )
+ with pytest.raises(ValueError):
+ client = CloudMemcacheClient(
+ client_options={"credentials_file": "credentials.json"},
+ transport=transport,
+ )
+
+ # It is an error to provide scopes and a transport instance.
+ transport = transports.CloudMemcacheGrpcTransport(
+ credentials=credentials.AnonymousCredentials(),
+ )
+ with pytest.raises(ValueError):
+ client = CloudMemcacheClient(
+ client_options={"scopes": ["1", "2"]}, transport=transport,
+ )
+
+
+def test_transport_instance():
+ # A client may be instantiated with a custom transport instance.
+ transport = transports.CloudMemcacheGrpcTransport(
+ credentials=credentials.AnonymousCredentials(),
+ )
+ client = CloudMemcacheClient(transport=transport)
+ assert client.transport is transport
+
+
+def test_transport_get_channel():
+ # A client may be instantiated with a custom transport instance.
+ transport = transports.CloudMemcacheGrpcTransport(
+ credentials=credentials.AnonymousCredentials(),
+ )
+ channel = transport.grpc_channel
+ assert channel
+
+ transport = transports.CloudMemcacheGrpcAsyncIOTransport(
+ credentials=credentials.AnonymousCredentials(),
+ )
+ channel = transport.grpc_channel
+ assert channel
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.CloudMemcacheGrpcTransport,
+ transports.CloudMemcacheGrpcAsyncIOTransport,
+ ],
+)
+def test_transport_adc(transport_class):
+ # Test default credentials are used if not provided.
+ with mock.patch.object(auth, "default") as adc:
+ adc.return_value = (credentials.AnonymousCredentials(), None)
+ transport_class()
+ adc.assert_called_once()
+
+
+def test_transport_grpc_default():
+ # A client should use the gRPC transport by default.
+ client = CloudMemcacheClient(credentials=credentials.AnonymousCredentials(),)
+ assert isinstance(client.transport, transports.CloudMemcacheGrpcTransport,)
+
+
+def test_cloud_memcache_base_transport_error():
+ # Passing both a credentials object and credentials_file should raise an error
+ with pytest.raises(exceptions.DuplicateCredentialArgs):
+ transport = transports.CloudMemcacheTransport(
+ credentials=credentials.AnonymousCredentials(),
+ credentials_file="credentials.json",
+ )
+
+
+def test_cloud_memcache_base_transport():
+ # Instantiate the base transport.
+ with mock.patch(
+ "google.cloud.memcache_v1beta2.services.cloud_memcache.transports.CloudMemcacheTransport.__init__"
+ ) as Transport:
+ Transport.return_value = None
+ transport = transports.CloudMemcacheTransport(
+ credentials=credentials.AnonymousCredentials(),
+ )
+
+ # Every method on the transport should just blindly
+ # raise NotImplementedError.
+ methods = (
+ "list_instances",
+ "get_instance",
+ "create_instance",
+ "update_instance",
+ "update_parameters",
+ "delete_instance",
+ "apply_parameters",
+ )
+ for method in methods:
+ with pytest.raises(NotImplementedError):
+ getattr(transport, method)(request=object())
+
+ # Additionally, the LRO client (a property) should
+ # also raise NotImplementedError
+ with pytest.raises(NotImplementedError):
+ transport.operations_client
+
+
+def test_cloud_memcache_base_transport_with_credentials_file():
+ # Instantiate the base transport with a credentials file
+ with mock.patch.object(
+ auth, "load_credentials_from_file"
+ ) as load_creds, mock.patch(
+ "google.cloud.memcache_v1beta2.services.cloud_memcache.transports.CloudMemcacheTransport._prep_wrapped_messages"
+ ) as Transport:
+ Transport.return_value = None
+ load_creds.return_value = (credentials.AnonymousCredentials(), None)
+ transport = transports.CloudMemcacheTransport(
+ credentials_file="credentials.json", quota_project_id="octopus",
+ )
+ load_creds.assert_called_once_with(
+ "credentials.json",
+ scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id="octopus",
+ )
+
+
+def test_cloud_memcache_base_transport_with_adc():
+ # Test the default credentials are used if credentials and credentials_file are None.
+ with mock.patch.object(auth, "default") as adc, mock.patch(
+ "google.cloud.memcache_v1beta2.services.cloud_memcache.transports.CloudMemcacheTransport._prep_wrapped_messages"
+ ) as Transport:
+ Transport.return_value = None
+ adc.return_value = (credentials.AnonymousCredentials(), None)
+ transport = transports.CloudMemcacheTransport()
+ adc.assert_called_once()
+
+
+def test_cloud_memcache_auth_adc():
+ # If no credentials are provided, we should use ADC credentials.
+ with mock.patch.object(auth, "default") as adc:
+ adc.return_value = (credentials.AnonymousCredentials(), None)
+ CloudMemcacheClient()
+ adc.assert_called_once_with(
+ scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id=None,
+ )
+
+
+def test_cloud_memcache_transport_auth_adc():
+ # If credentials and host are not provided, the transport class should use
+ # ADC credentials.
+ with mock.patch.object(auth, "default") as adc:
+ adc.return_value = (credentials.AnonymousCredentials(), None)
+ transports.CloudMemcacheGrpcTransport(
+ host="squid.clam.whelk", quota_project_id="octopus"
+ )
+ adc.assert_called_once_with(
+ scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id="octopus",
+ )
+
+
+def test_cloud_memcache_host_no_port():
+ client = CloudMemcacheClient(
+ credentials=credentials.AnonymousCredentials(),
+ client_options=client_options.ClientOptions(
+ api_endpoint="memcache.googleapis.com"
+ ),
+ )
+ assert client.transport._host == "memcache.googleapis.com:443"
+
+
+def test_cloud_memcache_host_with_port():
+ client = CloudMemcacheClient(
+ credentials=credentials.AnonymousCredentials(),
+ client_options=client_options.ClientOptions(
+ api_endpoint="memcache.googleapis.com:8000"
+ ),
+ )
+ assert client.transport._host == "memcache.googleapis.com:8000"
+
+
+def test_cloud_memcache_grpc_transport_channel():
+ channel = grpc.insecure_channel("http://localhost/")
+
+ # Check that channel is used if provided.
+ transport = transports.CloudMemcacheGrpcTransport(
+ host="squid.clam.whelk", channel=channel,
+ )
+ assert transport.grpc_channel == channel
+ assert transport._host == "squid.clam.whelk:443"
+ assert transport._ssl_channel_credentials == None
+
+
+def test_cloud_memcache_grpc_asyncio_transport_channel():
+ channel = aio.insecure_channel("http://localhost/")
+
+ # Check that channel is used if provided.
+ transport = transports.CloudMemcacheGrpcAsyncIOTransport(
+ host="squid.clam.whelk", channel=channel,
+ )
+ assert transport.grpc_channel == channel
+ assert transport._host == "squid.clam.whelk:443"
+ assert transport._ssl_channel_credentials == None
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.CloudMemcacheGrpcTransport,
+ transports.CloudMemcacheGrpcAsyncIOTransport,
+ ],
+)
+def test_cloud_memcache_transport_channel_mtls_with_client_cert_source(transport_class):
+ with mock.patch(
+ "grpc.ssl_channel_credentials", autospec=True
+ ) as grpc_ssl_channel_cred:
+ with mock.patch.object(
+ transport_class, "create_channel", autospec=True
+ ) as grpc_create_channel:
+ mock_ssl_cred = mock.Mock()
+ grpc_ssl_channel_cred.return_value = mock_ssl_cred
+
+ mock_grpc_channel = mock.Mock()
+ grpc_create_channel.return_value = mock_grpc_channel
+
+ cred = credentials.AnonymousCredentials()
+ with pytest.warns(DeprecationWarning):
+ with mock.patch.object(auth, "default") as adc:
+ adc.return_value = (cred, None)
+ transport = transport_class(
+ host="squid.clam.whelk",
+ api_mtls_endpoint="mtls.squid.clam.whelk",
+ client_cert_source=client_cert_source_callback,
+ )
+ adc.assert_called_once()
+
+ grpc_ssl_channel_cred.assert_called_once_with(
+ certificate_chain=b"cert bytes", private_key=b"key bytes"
+ )
+ grpc_create_channel.assert_called_once_with(
+ "mtls.squid.clam.whelk:443",
+ credentials=cred,
+ credentials_file=None,
+ scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ ssl_credentials=mock_ssl_cred,
+ quota_project_id=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+ assert transport.grpc_channel == mock_grpc_channel
+ assert transport._ssl_channel_credentials == mock_ssl_cred
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.CloudMemcacheGrpcTransport,
+ transports.CloudMemcacheGrpcAsyncIOTransport,
+ ],
+)
+def test_cloud_memcache_transport_channel_mtls_with_adc(transport_class):
+ mock_ssl_cred = mock.Mock()
+ with mock.patch.multiple(
+ "google.auth.transport.grpc.SslCredentials",
+ __init__=mock.Mock(return_value=None),
+ ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
+ ):
+ with mock.patch.object(
+ transport_class, "create_channel", autospec=True
+ ) as grpc_create_channel:
+ mock_grpc_channel = mock.Mock()
+ grpc_create_channel.return_value = mock_grpc_channel
+ mock_cred = mock.Mock()
+
+ with pytest.warns(DeprecationWarning):
+ transport = transport_class(
+ host="squid.clam.whelk",
+ credentials=mock_cred,
+ api_mtls_endpoint="mtls.squid.clam.whelk",
+ client_cert_source=None,
+ )
+
+ grpc_create_channel.assert_called_once_with(
+ "mtls.squid.clam.whelk:443",
+ credentials=mock_cred,
+ credentials_file=None,
+ scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ ssl_credentials=mock_ssl_cred,
+ quota_project_id=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+ assert transport.grpc_channel == mock_grpc_channel
+
+
+def test_cloud_memcache_grpc_lro_client():
+ client = CloudMemcacheClient(
+ credentials=credentials.AnonymousCredentials(), transport="grpc",
+ )
+ transport = client.transport
+
+ # Ensure that we have a api-core operations client.
+ assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
+
+ # Ensure that subsequent calls to the property send the exact same object.
+ assert transport.operations_client is transport.operations_client
+
+
+def test_cloud_memcache_grpc_lro_async_client():
+ client = CloudMemcacheAsyncClient(
+ credentials=credentials.AnonymousCredentials(), transport="grpc_asyncio",
+ )
+ transport = client.transport
+
+ # Ensure that we have a api-core operations client.
+ assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
+
+ # Ensure that subsequent calls to the property send the exact same object.
+ assert transport.operations_client is transport.operations_client
+
+
+def test_instance_path():
+ project = "squid"
+ location = "clam"
+ instance = "whelk"
+
+ expected = "projects/{project}/locations/{location}/instances/{instance}".format(
+ project=project, location=location, instance=instance,
+ )
+ actual = CloudMemcacheClient.instance_path(project, location, instance)
+ assert expected == actual
+
+
+def test_parse_instance_path():
+ expected = {
+ "project": "octopus",
+ "location": "oyster",
+ "instance": "nudibranch",
+ }
+ path = CloudMemcacheClient.instance_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = CloudMemcacheClient.parse_instance_path(path)
+ assert expected == actual
+
+
+def test_common_billing_account_path():
+ billing_account = "cuttlefish"
+
+ expected = "billingAccounts/{billing_account}".format(
+ billing_account=billing_account,
+ )
+ actual = CloudMemcacheClient.common_billing_account_path(billing_account)
+ assert expected == actual
+
+
+def test_parse_common_billing_account_path():
+ expected = {
+ "billing_account": "mussel",
+ }
+ path = CloudMemcacheClient.common_billing_account_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = CloudMemcacheClient.parse_common_billing_account_path(path)
+ assert expected == actual
+
+
+def test_common_folder_path():
+ folder = "winkle"
+
+ expected = "folders/{folder}".format(folder=folder,)
+ actual = CloudMemcacheClient.common_folder_path(folder)
+ assert expected == actual
+
+
+def test_parse_common_folder_path():
+ expected = {
+ "folder": "nautilus",
+ }
+ path = CloudMemcacheClient.common_folder_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = CloudMemcacheClient.parse_common_folder_path(path)
+ assert expected == actual
+
+
+def test_common_organization_path():
+ organization = "scallop"
+
+ expected = "organizations/{organization}".format(organization=organization,)
+ actual = CloudMemcacheClient.common_organization_path(organization)
+ assert expected == actual
+
+
+def test_parse_common_organization_path():
+ expected = {
+ "organization": "abalone",
+ }
+ path = CloudMemcacheClient.common_organization_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = CloudMemcacheClient.parse_common_organization_path(path)
+ assert expected == actual
+
+
+def test_common_project_path():
+ project = "squid"
+
+ expected = "projects/{project}".format(project=project,)
+ actual = CloudMemcacheClient.common_project_path(project)
+ assert expected == actual
+
+
+def test_parse_common_project_path():
+ expected = {
+ "project": "clam",
+ }
+ path = CloudMemcacheClient.common_project_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = CloudMemcacheClient.parse_common_project_path(path)
+ assert expected == actual
+
+
+def test_common_location_path():
+ project = "whelk"
+ location = "octopus"
+
+ expected = "projects/{project}/locations/{location}".format(
+ project=project, location=location,
+ )
+ actual = CloudMemcacheClient.common_location_path(project, location)
+ assert expected == actual
+
+
+def test_parse_common_location_path():
+ expected = {
+ "project": "oyster",
+ "location": "nudibranch",
+ }
+ path = CloudMemcacheClient.common_location_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = CloudMemcacheClient.parse_common_location_path(path)
+ assert expected == actual
+
+
+def test_client_withDEFAULT_CLIENT_INFO():
+ client_info = gapic_v1.client_info.ClientInfo()
+
+ with mock.patch.object(
+ transports.CloudMemcacheTransport, "_prep_wrapped_messages"
+ ) as prep:
+ client = CloudMemcacheClient(
+ credentials=credentials.AnonymousCredentials(), client_info=client_info,
+ )
+ prep.assert_called_once_with(client_info)
+
+ with mock.patch.object(
+ transports.CloudMemcacheTransport, "_prep_wrapped_messages"
+ ) as prep:
+ transport_class = CloudMemcacheClient.get_transport_class()
+ transport = transport_class(
+ credentials=credentials.AnonymousCredentials(), client_info=client_info,
+ )
+ prep.assert_called_once_with(client_info)
diff --git a/tests/unit/memcache_v1beta2/test_cloud_memcache.py b/tests/unit/memcache_v1beta2/test_cloud_memcache.py
deleted file mode 100644
index b7e87f4..0000000
--- a/tests/unit/memcache_v1beta2/test_cloud_memcache.py
+++ /dev/null
@@ -1,929 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright 2020 Google LLC
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-from unittest import mock
-
-import grpc
-import math
-import pytest
-
-from google import auth
-from google.api_core import client_options
-from google.api_core import future
-from google.api_core import grpc_helpers
-from google.api_core import operations_v1
-from google.auth import credentials
-from google.cloud.memcache_v1beta2.services.cloud_memcache import CloudMemcacheClient
-from google.cloud.memcache_v1beta2.services.cloud_memcache import pagers
-from google.cloud.memcache_v1beta2.services.cloud_memcache import transports
-from google.cloud.memcache_v1beta2.types import cloud_memcache
-from google.longrunning import operations_pb2
-from google.oauth2 import service_account
-from google.protobuf import field_mask_pb2 as field_mask # type: ignore
-from google.protobuf import timestamp_pb2 as timestamp # type: ignore
-
-
-def client_cert_source_callback():
- return b"cert bytes", b"key bytes"
-
-
-def test__get_default_mtls_endpoint():
- api_endpoint = "example.googleapis.com"
- api_mtls_endpoint = "example.mtls.googleapis.com"
- sandbox_endpoint = "example.sandbox.googleapis.com"
- sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
- non_googleapi = "api.example.com"
-
- assert CloudMemcacheClient._get_default_mtls_endpoint(None) is None
- assert (
- CloudMemcacheClient._get_default_mtls_endpoint(api_endpoint)
- == api_mtls_endpoint
- )
- assert (
- CloudMemcacheClient._get_default_mtls_endpoint(api_mtls_endpoint)
- == api_mtls_endpoint
- )
- assert (
- CloudMemcacheClient._get_default_mtls_endpoint(sandbox_endpoint)
- == sandbox_mtls_endpoint
- )
- assert (
- CloudMemcacheClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
- == sandbox_mtls_endpoint
- )
- assert (
- CloudMemcacheClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
- )
-
-
-def test_cloud_memcache_client_from_service_account_file():
- creds = credentials.AnonymousCredentials()
- with mock.patch.object(
- service_account.Credentials, "from_service_account_file"
- ) as factory:
- factory.return_value = creds
- client = CloudMemcacheClient.from_service_account_file("dummy/file/path.json")
- assert client._transport._credentials == creds
-
- client = CloudMemcacheClient.from_service_account_json("dummy/file/path.json")
- assert client._transport._credentials == creds
-
- assert client._transport._host == "memcache.googleapis.com:443"
-
-
-def test_cloud_memcache_client_client_options():
- # Check that if channel is provided we won't create a new one.
- with mock.patch(
- "google.cloud.memcache_v1beta2.services.cloud_memcache.CloudMemcacheClient.get_transport_class"
- ) as gtc:
- transport = transports.CloudMemcacheGrpcTransport(
- credentials=credentials.AnonymousCredentials()
- )
- client = CloudMemcacheClient(transport=transport)
- gtc.assert_not_called()
-
- # Check mTLS is not triggered with empty client options.
- options = client_options.ClientOptions()
- with mock.patch(
- "google.cloud.memcache_v1beta2.services.cloud_memcache.CloudMemcacheClient.get_transport_class"
- ) as gtc:
- transport = gtc.return_value = mock.MagicMock()
- client = CloudMemcacheClient(client_options=options)
- transport.assert_called_once_with(
- credentials=None, host=client.DEFAULT_ENDPOINT
- )
-
- # Check mTLS is not triggered if api_endpoint is provided but
- # client_cert_source is None.
- options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
- with mock.patch(
- "google.cloud.memcache_v1beta2.services.cloud_memcache.transports.CloudMemcacheGrpcTransport.__init__"
- ) as grpc_transport:
- grpc_transport.return_value = None
- client = CloudMemcacheClient(client_options=options)
- grpc_transport.assert_called_once_with(
- api_mtls_endpoint=None,
- client_cert_source=None,
- credentials=None,
- host="squid.clam.whelk",
- )
-
- # Check mTLS is triggered if client_cert_source is provided.
- options = client_options.ClientOptions(
- client_cert_source=client_cert_source_callback
- )
- with mock.patch(
- "google.cloud.memcache_v1beta2.services.cloud_memcache.transports.CloudMemcacheGrpcTransport.__init__"
- ) as grpc_transport:
- grpc_transport.return_value = None
- client = CloudMemcacheClient(client_options=options)
- grpc_transport.assert_called_once_with(
- api_mtls_endpoint=client.DEFAULT_MTLS_ENDPOINT,
- client_cert_source=client_cert_source_callback,
- credentials=None,
- host=client.DEFAULT_ENDPOINT,
- )
-
- # Check mTLS is triggered if api_endpoint and client_cert_source are provided.
- options = client_options.ClientOptions(
- api_endpoint="squid.clam.whelk", client_cert_source=client_cert_source_callback
- )
- with mock.patch(
- "google.cloud.memcache_v1beta2.services.cloud_memcache.transports.CloudMemcacheGrpcTransport.__init__"
- ) as grpc_transport:
- grpc_transport.return_value = None
- client = CloudMemcacheClient(client_options=options)
- grpc_transport.assert_called_once_with(
- api_mtls_endpoint="squid.clam.whelk",
- client_cert_source=client_cert_source_callback,
- credentials=None,
- host="squid.clam.whelk",
- )
-
-
-def test_cloud_memcache_client_client_options_from_dict():
- with mock.patch(
- "google.cloud.memcache_v1beta2.services.cloud_memcache.transports.CloudMemcacheGrpcTransport.__init__"
- ) as grpc_transport:
- grpc_transport.return_value = None
- client = CloudMemcacheClient(
- client_options={"api_endpoint": "squid.clam.whelk"}
- )
- grpc_transport.assert_called_once_with(
- api_mtls_endpoint=None,
- client_cert_source=None,
- credentials=None,
- host="squid.clam.whelk",
- )
-
-
-def test_list_instances(transport: str = "grpc"):
- client = CloudMemcacheClient(
- credentials=credentials.AnonymousCredentials(), transport=transport
- )
-
- # Everything is optional in proto3 as far as the runtime is concerned,
- # and we are mocking out the actual API, so just send an empty request.
- request = cloud_memcache.ListInstancesRequest()
-
- # Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_instances), "__call__") as call:
- # Designate an appropriate return value for the call.
- call.return_value = cloud_memcache.ListInstancesResponse(
- next_page_token="next_page_token_value", unreachable=["unreachable_value"]
- )
-
- response = client.list_instances(request)
-
- # Establish that the underlying gRPC stub method was called.
- assert len(call.mock_calls) == 1
- _, args, _ = call.mock_calls[0]
-
- assert args[0] == request
-
- # Establish that the response is the type that we expect.
- assert isinstance(response, pagers.ListInstancesPager)
- assert response.next_page_token == "next_page_token_value"
- assert response.unreachable == ["unreachable_value"]
-
-
-def test_list_instances_field_headers():
- client = CloudMemcacheClient(credentials=credentials.AnonymousCredentials())
-
- # Any value that is part of the HTTP/1.1 URI should be sent as
- # a field header. Set these to a non-empty value.
- request = cloud_memcache.ListInstancesRequest(parent="parent/value")
-
- # Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_instances), "__call__") as call:
- call.return_value = cloud_memcache.ListInstancesResponse()
- client.list_instances(request)
-
- # Establish that the underlying gRPC stub method was called.
- assert len(call.mock_calls) == 1
- _, args, _ = call.mock_calls[0]
- assert args[0] == request
-
- # Establish that the field header was sent.
- _, _, kw = call.mock_calls[0]
- assert ("x-goog-request-params", "parent=parent/value") in kw["metadata"]
-
-
-def test_list_instances_flattened():
- client = CloudMemcacheClient(credentials=credentials.AnonymousCredentials())
-
- # Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_instances), "__call__") as call:
- # Designate an appropriate return value for the call.
- call.return_value = cloud_memcache.ListInstancesResponse()
-
- # Call the method with a truthy value for each flattened field,
- # using the keyword arguments to the method.
- response = client.list_instances(parent="parent_value")
-
- # Establish that the underlying call was made with the expected
- # request object values.
- assert len(call.mock_calls) == 1
- _, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
-
-
-def test_list_instances_flattened_error():
- client = CloudMemcacheClient(credentials=credentials.AnonymousCredentials())
-
- # Attempting to call a method with both a request object and flattened
- # fields is an error.
- with pytest.raises(ValueError):
- client.list_instances(
- cloud_memcache.ListInstancesRequest(), parent="parent_value"
- )
-
-
-def test_list_instances_pager():
- client = CloudMemcacheClient(credentials=credentials.AnonymousCredentials)
-
- # Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_instances), "__call__") as call:
- # Set the response to a series of pages.
- call.side_effect = (
- cloud_memcache.ListInstancesResponse(
- resources=[
- cloud_memcache.Instance(),
- cloud_memcache.Instance(),
- cloud_memcache.Instance(),
- ],
- next_page_token="abc",
- ),
- cloud_memcache.ListInstancesResponse(resources=[], next_page_token="def"),
- cloud_memcache.ListInstancesResponse(
- resources=[cloud_memcache.Instance()], next_page_token="ghi"
- ),
- cloud_memcache.ListInstancesResponse(
- resources=[cloud_memcache.Instance(), cloud_memcache.Instance()]
- ),
- RuntimeError,
- )
- results = [i for i in client.list_instances(request={})]
- assert len(results) == 6
- assert all(isinstance(i, cloud_memcache.Instance) for i in results)
-
-
-def test_list_instances_pages():
- client = CloudMemcacheClient(credentials=credentials.AnonymousCredentials)
-
- # Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.list_instances), "__call__") as call:
- # Set the response to a series of pages.
- call.side_effect = (
- cloud_memcache.ListInstancesResponse(
- resources=[
- cloud_memcache.Instance(),
- cloud_memcache.Instance(),
- cloud_memcache.Instance(),
- ],
- next_page_token="abc",
- ),
- cloud_memcache.ListInstancesResponse(resources=[], next_page_token="def"),
- cloud_memcache.ListInstancesResponse(
- resources=[cloud_memcache.Instance()], next_page_token="ghi"
- ),
- cloud_memcache.ListInstancesResponse(
- resources=[cloud_memcache.Instance(), cloud_memcache.Instance()]
- ),
- RuntimeError,
- )
- pages = list(client.list_instances(request={}).pages)
- for page, token in zip(pages, ["abc", "def", "ghi", ""]):
- assert page.raw_page.next_page_token == token
-
-
-def test_get_instance(transport: str = "grpc"):
- client = CloudMemcacheClient(
- credentials=credentials.AnonymousCredentials(), transport=transport
- )
-
- # Everything is optional in proto3 as far as the runtime is concerned,
- # and we are mocking out the actual API, so just send an empty request.
- request = cloud_memcache.GetInstanceRequest()
-
- # Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.get_instance), "__call__") as call:
- # Designate an appropriate return value for the call.
- call.return_value = cloud_memcache.Instance(
- name="name_value",
- display_name="display_name_value",
- authorized_network="authorized_network_value",
- zones=["zones_value"],
- node_count=1070,
- memcache_version=cloud_memcache.MemcacheVersion.MEMCACHE_1_5,
- state=cloud_memcache.Instance.State.CREATING,
- memcache_full_version="memcache_full_version_value",
- discovery_endpoint="discovery_endpoint_value",
- )
-
- response = client.get_instance(request)
-
- # Establish that the underlying gRPC stub method was called.
- assert len(call.mock_calls) == 1
- _, args, _ = call.mock_calls[0]
-
- assert args[0] == request
-
- # Establish that the response is the type that we expect.
- assert isinstance(response, cloud_memcache.Instance)
- assert response.name == "name_value"
- assert response.display_name == "display_name_value"
- assert response.authorized_network == "authorized_network_value"
- assert response.zones == ["zones_value"]
- assert response.node_count == 1070
- assert response.memcache_version == cloud_memcache.MemcacheVersion.MEMCACHE_1_5
- assert response.state == cloud_memcache.Instance.State.CREATING
- assert response.memcache_full_version == "memcache_full_version_value"
- assert response.discovery_endpoint == "discovery_endpoint_value"
-
-
-def test_get_instance_field_headers():
- client = CloudMemcacheClient(credentials=credentials.AnonymousCredentials())
-
- # Any value that is part of the HTTP/1.1 URI should be sent as
- # a field header. Set these to a non-empty value.
- request = cloud_memcache.GetInstanceRequest(name="name/value")
-
- # Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.get_instance), "__call__") as call:
- call.return_value = cloud_memcache.Instance()
- client.get_instance(request)
-
- # Establish that the underlying gRPC stub method was called.
- assert len(call.mock_calls) == 1
- _, args, _ = call.mock_calls[0]
- assert args[0] == request
-
- # Establish that the field header was sent.
- _, _, kw = call.mock_calls[0]
- assert ("x-goog-request-params", "name=name/value") in kw["metadata"]
-
-
-def test_get_instance_flattened():
- client = CloudMemcacheClient(credentials=credentials.AnonymousCredentials())
-
- # Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.get_instance), "__call__") as call:
- # Designate an appropriate return value for the call.
- call.return_value = cloud_memcache.Instance()
-
- # Call the method with a truthy value for each flattened field,
- # using the keyword arguments to the method.
- response = client.get_instance(name="name_value")
-
- # Establish that the underlying call was made with the expected
- # request object values.
- assert len(call.mock_calls) == 1
- _, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
-
-
-def test_get_instance_flattened_error():
- client = CloudMemcacheClient(credentials=credentials.AnonymousCredentials())
-
- # Attempting to call a method with both a request object and flattened
- # fields is an error.
- with pytest.raises(ValueError):
- client.get_instance(cloud_memcache.GetInstanceRequest(), name="name_value")
-
-
-def test_create_instance(transport: str = "grpc"):
- client = CloudMemcacheClient(
- credentials=credentials.AnonymousCredentials(), transport=transport
- )
-
- # Everything is optional in proto3 as far as the runtime is concerned,
- # and we are mocking out the actual API, so just send an empty request.
- request = cloud_memcache.CreateInstanceRequest()
-
- # Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.create_instance), "__call__") as call:
- # Designate an appropriate return value for the call.
- call.return_value = operations_pb2.Operation(name="operations/spam")
-
- response = client.create_instance(request)
-
- # Establish that the underlying gRPC stub method was called.
- assert len(call.mock_calls) == 1
- _, args, _ = call.mock_calls[0]
-
- assert args[0] == request
-
- # Establish that the response is the type that we expect.
- assert isinstance(response, future.Future)
-
-
-def test_create_instance_flattened():
- client = CloudMemcacheClient(credentials=credentials.AnonymousCredentials())
-
- # Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.create_instance), "__call__") as call:
- # Designate an appropriate return value for the call.
- call.return_value = operations_pb2.Operation(name="operations/op")
-
- # Call the method with a truthy value for each flattened field,
- # using the keyword arguments to the method.
- response = client.create_instance(
- parent="parent_value",
- instance_id="instance_id_value",
- resource=cloud_memcache.Instance(name="name_value"),
- )
-
- # Establish that the underlying call was made with the expected
- # request object values.
- assert len(call.mock_calls) == 1
- _, args, _ = call.mock_calls[0]
- assert args[0].parent == "parent_value"
- assert args[0].instance_id == "instance_id_value"
- assert args[0].resource == cloud_memcache.Instance(name="name_value")
-
-
-def test_create_instance_flattened_error():
- client = CloudMemcacheClient(credentials=credentials.AnonymousCredentials())
-
- # Attempting to call a method with both a request object and flattened
- # fields is an error.
- with pytest.raises(ValueError):
- client.create_instance(
- cloud_memcache.CreateInstanceRequest(),
- parent="parent_value",
- instance_id="instance_id_value",
- resource=cloud_memcache.Instance(name="name_value"),
- )
-
-
-def test_update_instance(transport: str = "grpc"):
- client = CloudMemcacheClient(
- credentials=credentials.AnonymousCredentials(), transport=transport
- )
-
- # Everything is optional in proto3 as far as the runtime is concerned,
- # and we are mocking out the actual API, so just send an empty request.
- request = cloud_memcache.UpdateInstanceRequest()
-
- # Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.update_instance), "__call__") as call:
- # Designate an appropriate return value for the call.
- call.return_value = operations_pb2.Operation(name="operations/spam")
-
- response = client.update_instance(request)
-
- # Establish that the underlying gRPC stub method was called.
- assert len(call.mock_calls) == 1
- _, args, _ = call.mock_calls[0]
-
- assert args[0] == request
-
- # Establish that the response is the type that we expect.
- assert isinstance(response, future.Future)
-
-
-def test_update_instance_flattened():
- client = CloudMemcacheClient(credentials=credentials.AnonymousCredentials())
-
- # Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.update_instance), "__call__") as call:
- # Designate an appropriate return value for the call.
- call.return_value = operations_pb2.Operation(name="operations/op")
-
- # Call the method with a truthy value for each flattened field,
- # using the keyword arguments to the method.
- response = client.update_instance(
- update_mask=field_mask.FieldMask(paths=["paths_value"]),
- resource=cloud_memcache.Instance(name="name_value"),
- )
-
- # Establish that the underlying call was made with the expected
- # request object values.
- assert len(call.mock_calls) == 1
- _, args, _ = call.mock_calls[0]
- assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"])
- assert args[0].resource == cloud_memcache.Instance(name="name_value")
-
-
-def test_update_instance_flattened_error():
- client = CloudMemcacheClient(credentials=credentials.AnonymousCredentials())
-
- # Attempting to call a method with both a request object and flattened
- # fields is an error.
- with pytest.raises(ValueError):
- client.update_instance(
- cloud_memcache.UpdateInstanceRequest(),
- update_mask=field_mask.FieldMask(paths=["paths_value"]),
- resource=cloud_memcache.Instance(name="name_value"),
- )
-
-
-def test_update_parameters(transport: str = "grpc"):
- client = CloudMemcacheClient(
- credentials=credentials.AnonymousCredentials(), transport=transport
- )
-
- # Everything is optional in proto3 as far as the runtime is concerned,
- # and we are mocking out the actual API, so just send an empty request.
- request = cloud_memcache.UpdateParametersRequest()
-
- # Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._transport.update_parameters), "__call__"
- ) as call:
- # Designate an appropriate return value for the call.
- call.return_value = operations_pb2.Operation(name="operations/spam")
-
- response = client.update_parameters(request)
-
- # Establish that the underlying gRPC stub method was called.
- assert len(call.mock_calls) == 1
- _, args, _ = call.mock_calls[0]
-
- assert args[0] == request
-
- # Establish that the response is the type that we expect.
- assert isinstance(response, future.Future)
-
-
-def test_update_parameters_flattened():
- client = CloudMemcacheClient(credentials=credentials.AnonymousCredentials())
-
- # Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._transport.update_parameters), "__call__"
- ) as call:
- # Designate an appropriate return value for the call.
- call.return_value = operations_pb2.Operation(name="operations/op")
-
- # Call the method with a truthy value for each flattened field,
- # using the keyword arguments to the method.
- response = client.update_parameters(
- name="name_value",
- update_mask=field_mask.FieldMask(paths=["paths_value"]),
- parameters=cloud_memcache.MemcacheParameters(id="id_value"),
- )
-
- # Establish that the underlying call was made with the expected
- # request object values.
- assert len(call.mock_calls) == 1
- _, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
- assert args[0].update_mask == field_mask.FieldMask(paths=["paths_value"])
- assert args[0].parameters == cloud_memcache.MemcacheParameters(id="id_value")
-
-
-def test_update_parameters_flattened_error():
- client = CloudMemcacheClient(credentials=credentials.AnonymousCredentials())
-
- # Attempting to call a method with both a request object and flattened
- # fields is an error.
- with pytest.raises(ValueError):
- client.update_parameters(
- cloud_memcache.UpdateParametersRequest(),
- name="name_value",
- update_mask=field_mask.FieldMask(paths=["paths_value"]),
- parameters=cloud_memcache.MemcacheParameters(id="id_value"),
- )
-
-
-def test_delete_instance(transport: str = "grpc"):
- client = CloudMemcacheClient(
- credentials=credentials.AnonymousCredentials(), transport=transport
- )
-
- # Everything is optional in proto3 as far as the runtime is concerned,
- # and we are mocking out the actual API, so just send an empty request.
- request = cloud_memcache.DeleteInstanceRequest()
-
- # Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.delete_instance), "__call__") as call:
- # Designate an appropriate return value for the call.
- call.return_value = operations_pb2.Operation(name="operations/spam")
-
- response = client.delete_instance(request)
-
- # Establish that the underlying gRPC stub method was called.
- assert len(call.mock_calls) == 1
- _, args, _ = call.mock_calls[0]
-
- assert args[0] == request
-
- # Establish that the response is the type that we expect.
- assert isinstance(response, future.Future)
-
-
-def test_delete_instance_flattened():
- client = CloudMemcacheClient(credentials=credentials.AnonymousCredentials())
-
- # Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(type(client._transport.delete_instance), "__call__") as call:
- # Designate an appropriate return value for the call.
- call.return_value = operations_pb2.Operation(name="operations/op")
-
- # Call the method with a truthy value for each flattened field,
- # using the keyword arguments to the method.
- response = client.delete_instance(name="name_value")
-
- # Establish that the underlying call was made with the expected
- # request object values.
- assert len(call.mock_calls) == 1
- _, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
-
-
-def test_delete_instance_flattened_error():
- client = CloudMemcacheClient(credentials=credentials.AnonymousCredentials())
-
- # Attempting to call a method with both a request object and flattened
- # fields is an error.
- with pytest.raises(ValueError):
- client.delete_instance(
- cloud_memcache.DeleteInstanceRequest(), name="name_value"
- )
-
-
-def test_apply_parameters(transport: str = "grpc"):
- client = CloudMemcacheClient(
- credentials=credentials.AnonymousCredentials(), transport=transport
- )
-
- # Everything is optional in proto3 as far as the runtime is concerned,
- # and we are mocking out the actual API, so just send an empty request.
- request = cloud_memcache.ApplyParametersRequest()
-
- # Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._transport.apply_parameters), "__call__"
- ) as call:
- # Designate an appropriate return value for the call.
- call.return_value = operations_pb2.Operation(name="operations/spam")
-
- response = client.apply_parameters(request)
-
- # Establish that the underlying gRPC stub method was called.
- assert len(call.mock_calls) == 1
- _, args, _ = call.mock_calls[0]
-
- assert args[0] == request
-
- # Establish that the response is the type that we expect.
- assert isinstance(response, future.Future)
-
-
-def test_apply_parameters_flattened():
- client = CloudMemcacheClient(credentials=credentials.AnonymousCredentials())
-
- # Mock the actual call within the gRPC stub, and fake the request.
- with mock.patch.object(
- type(client._transport.apply_parameters), "__call__"
- ) as call:
- # Designate an appropriate return value for the call.
- call.return_value = operations_pb2.Operation(name="operations/op")
-
- # Call the method with a truthy value for each flattened field,
- # using the keyword arguments to the method.
- response = client.apply_parameters(
- name="name_value", node_ids=["node_ids_value"], apply_all=True
- )
-
- # Establish that the underlying call was made with the expected
- # request object values.
- assert len(call.mock_calls) == 1
- _, args, _ = call.mock_calls[0]
- assert args[0].name == "name_value"
- assert args[0].node_ids == ["node_ids_value"]
- assert args[0].apply_all == True
-
-
-def test_apply_parameters_flattened_error():
- client = CloudMemcacheClient(credentials=credentials.AnonymousCredentials())
-
- # Attempting to call a method with both a request object and flattened
- # fields is an error.
- with pytest.raises(ValueError):
- client.apply_parameters(
- cloud_memcache.ApplyParametersRequest(),
- name="name_value",
- node_ids=["node_ids_value"],
- apply_all=True,
- )
-
-
-def test_credentials_transport_error():
- # It is an error to provide credentials and a transport instance.
- transport = transports.CloudMemcacheGrpcTransport(
- credentials=credentials.AnonymousCredentials()
- )
- with pytest.raises(ValueError):
- client = CloudMemcacheClient(
- credentials=credentials.AnonymousCredentials(), transport=transport
- )
-
-
-def test_transport_instance():
- # A client may be instantiated with a custom transport instance.
- transport = transports.CloudMemcacheGrpcTransport(
- credentials=credentials.AnonymousCredentials()
- )
- client = CloudMemcacheClient(transport=transport)
- assert client._transport is transport
-
-
-def test_transport_grpc_default():
- # A client should use the gRPC transport by default.
- client = CloudMemcacheClient(credentials=credentials.AnonymousCredentials())
- assert isinstance(client._transport, transports.CloudMemcacheGrpcTransport)
-
-
-def test_cloud_memcache_base_transport():
- # Instantiate the base transport.
- transport = transports.CloudMemcacheTransport(
- credentials=credentials.AnonymousCredentials()
- )
-
- # Every method on the transport should just blindly
- # raise NotImplementedError.
- methods = (
- "list_instances",
- "get_instance",
- "create_instance",
- "update_instance",
- "update_parameters",
- "delete_instance",
- "apply_parameters",
- )
- for method in methods:
- with pytest.raises(NotImplementedError):
- getattr(transport, method)(request=object())
-
- # Additionally, the LRO client (a property) should
- # also raise NotImplementedError
- with pytest.raises(NotImplementedError):
- transport.operations_client
-
-
-def test_cloud_memcache_auth_adc():
- # If no credentials are provided, we should use ADC credentials.
- with mock.patch.object(auth, "default") as adc:
- adc.return_value = (credentials.AnonymousCredentials(), None)
- CloudMemcacheClient()
- adc.assert_called_once_with(
- scopes=("https://www.googleapis.com/auth/cloud-platform",)
- )
-
-
-def test_cloud_memcache_host_no_port():
- client = CloudMemcacheClient(
- credentials=credentials.AnonymousCredentials(),
- client_options=client_options.ClientOptions(
- api_endpoint="memcache.googleapis.com"
- ),
- transport="grpc",
- )
- assert client._transport._host == "memcache.googleapis.com:443"
-
-
-def test_cloud_memcache_host_with_port():
- client = CloudMemcacheClient(
- credentials=credentials.AnonymousCredentials(),
- client_options=client_options.ClientOptions(
- api_endpoint="memcache.googleapis.com:8000"
- ),
- transport="grpc",
- )
- assert client._transport._host == "memcache.googleapis.com:8000"
-
-
-def test_cloud_memcache_grpc_transport_channel():
- channel = grpc.insecure_channel("http://localhost/")
-
- # Check that if channel is provided, mtls endpoint and client_cert_source
- # won't be used.
- callback = mock.MagicMock()
- transport = transports.CloudMemcacheGrpcTransport(
- host="squid.clam.whelk",
- channel=channel,
- api_mtls_endpoint="mtls.squid.clam.whelk",
- client_cert_source=callback,
- )
- assert transport.grpc_channel == channel
- assert transport._host == "squid.clam.whelk:443"
- assert not callback.called
-
-
-@mock.patch("grpc.ssl_channel_credentials", autospec=True)
-@mock.patch("google.api_core.grpc_helpers.create_channel", autospec=True)
-def test_cloud_memcache_grpc_transport_channel_mtls_with_client_cert_source(
- grpc_create_channel, grpc_ssl_channel_cred
-):
- # Check that if channel is None, but api_mtls_endpoint and client_cert_source
- # are provided, then a mTLS channel will be created.
- mock_cred = mock.Mock()
-
- mock_ssl_cred = mock.Mock()
- grpc_ssl_channel_cred.return_value = mock_ssl_cred
-
- mock_grpc_channel = mock.Mock()
- grpc_create_channel.return_value = mock_grpc_channel
-
- transport = transports.CloudMemcacheGrpcTransport(
- host="squid.clam.whelk",
- credentials=mock_cred,
- api_mtls_endpoint="mtls.squid.clam.whelk",
- client_cert_source=client_cert_source_callback,
- )
- grpc_ssl_channel_cred.assert_called_once_with(
- certificate_chain=b"cert bytes", private_key=b"key bytes"
- )
- grpc_create_channel.assert_called_once_with(
- "mtls.squid.clam.whelk:443",
- credentials=mock_cred,
- ssl_credentials=mock_ssl_cred,
- scopes=("https://www.googleapis.com/auth/cloud-platform",),
- )
- assert transport.grpc_channel == mock_grpc_channel
-
-
-@pytest.mark.parametrize(
- "api_mtls_endpoint", ["mtls.squid.clam.whelk", "mtls.squid.clam.whelk:443"]
-)
-@mock.patch("google.api_core.grpc_helpers.create_channel", autospec=True)
-def test_cloud_memcache_grpc_transport_channel_mtls_with_adc(
- grpc_create_channel, api_mtls_endpoint
-):
- # Check that if channel and client_cert_source are None, but api_mtls_endpoint
- # is provided, then a mTLS channel will be created with SSL ADC.
- mock_grpc_channel = mock.Mock()
- grpc_create_channel.return_value = mock_grpc_channel
-
- # Mock google.auth.transport.grpc.SslCredentials class.
- mock_ssl_cred = mock.Mock()
- with mock.patch.multiple(
- "google.auth.transport.grpc.SslCredentials",
- __init__=mock.Mock(return_value=None),
- ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
- ):
- mock_cred = mock.Mock()
- transport = transports.CloudMemcacheGrpcTransport(
- host="squid.clam.whelk",
- credentials=mock_cred,
- api_mtls_endpoint=api_mtls_endpoint,
- client_cert_source=None,
- )
- grpc_create_channel.assert_called_once_with(
- "mtls.squid.clam.whelk:443",
- credentials=mock_cred,
- ssl_credentials=mock_ssl_cred,
- scopes=("https://www.googleapis.com/auth/cloud-platform",),
- )
- assert transport.grpc_channel == mock_grpc_channel
-
-
-def test_cloud_memcache_grpc_lro_client():
- client = CloudMemcacheClient(
- credentials=credentials.AnonymousCredentials(), transport="grpc"
- )
- transport = client._transport
-
- # Ensure that we have a api-core operations client.
- assert isinstance(transport.operations_client, operations_v1.OperationsClient)
-
- # Ensure that subsequent calls to the property send the exact same object.
- assert transport.operations_client is transport.operations_client
-
-
-def test_instance_path():
- project = "squid"
- location = "clam"
- instance = "whelk"
-
- expected = "projects/{project}/locations/{location}/instances/{instance}".format(
- project=project, location=location, instance=instance
- )
- actual = CloudMemcacheClient.instance_path(project, location, instance)
- assert expected == actual
-
-
-def test_parse_instance_path():
- expected = {"project": "octopus", "location": "oyster", "instance": "nudibranch"}
- path = CloudMemcacheClient.instance_path(**expected)
-
- # Check that the path construction is reversible.
- actual = CloudMemcacheClient.parse_instance_path(path)
- assert expected == actual