From 8634282ea0aff66815d6fafdb8ea53810161db96 Mon Sep 17 00:00:00 2001 From: Owl Bot Date: Fri, 23 Jul 2021 21:21:10 +0000 Subject: [PATCH 1/2] fix: enable self signed jwt for grpc chore: use gapic-generator-python 0.50.5 PiperOrigin-RevId: 386504689 Source-Link: https://github.com/googleapis/googleapis/commit/762094a99ac6e03a17516b13dfbef37927267a70 Source-Link: https://github.com/googleapis/googleapis-gen/commit/6bfc480e1a161d5de121c2bcc3745885d33b265a --- owl-bot-staging/v1/.coveragerc | 17 + owl-bot-staging/v1/MANIFEST.in | 2 + owl-bot-staging/v1/README.rst | 49 + owl-bot-staging/v1/docs/conf.py | 376 + .../v1/docs/container_v1/cluster_manager.rst | 10 + .../v1/docs/container_v1/services.rst | 6 + .../v1/docs/container_v1/types.rst | 7 + owl-bot-staging/v1/docs/index.rst | 7 + .../v1/google/container/__init__.py | 213 + owl-bot-staging/v1/google/container/py.typed | 2 + .../v1/google/container_v1/__init__.py | 214 + .../google/container_v1/gapic_metadata.json | 343 + .../v1/google/container_v1/py.typed | 2 + .../google/container_v1/services/__init__.py | 15 + .../services/cluster_manager/__init__.py | 22 + .../services/cluster_manager/async_client.py | 3604 ++++++ .../services/cluster_manager/client.py | 3731 +++++++ .../services/cluster_manager/pagers.py | 140 + .../cluster_manager/transports/__init__.py | 33 + .../cluster_manager/transports/base.py | 666 ++ .../cluster_manager/transports/grpc.py | 1097 ++ .../transports/grpc_asyncio.py | 1101 ++ .../v1/google/container_v1/types/__init__.py | 210 + .../container_v1/types/cluster_service.py | 5120 +++++++++ owl-bot-staging/v1/mypy.ini | 3 + owl-bot-staging/v1/noxfile.py | 132 + .../v1/scripts/fixup_container_v1_keywords.py | 207 + owl-bot-staging/v1/setup.py | 54 + owl-bot-staging/v1/tests/__init__.py | 16 + owl-bot-staging/v1/tests/unit/__init__.py | 16 + .../v1/tests/unit/gapic/__init__.py | 16 + .../tests/unit/gapic/container_v1/__init__.py | 16 + .../container_v1/test_cluster_manager.py | 9434 ++++++++++++++++ owl-bot-staging/v1beta1/.coveragerc | 17 + owl-bot-staging/v1beta1/MANIFEST.in | 2 + owl-bot-staging/v1beta1/README.rst | 49 + owl-bot-staging/v1beta1/docs/conf.py | 376 + .../container_v1beta1/cluster_manager.rst | 10 + .../docs/container_v1beta1/services.rst | 6 + .../v1beta1/docs/container_v1beta1/types.rst | 7 + owl-bot-staging/v1beta1/docs/index.rst | 7 + .../v1beta1/google/container/__init__.py | 249 + .../v1beta1/google/container/py.typed | 2 + .../google/container_v1beta1/__init__.py | 250 + .../container_v1beta1/gapic_metadata.json | 353 + .../v1beta1/google/container_v1beta1/py.typed | 2 + .../container_v1beta1/services/__init__.py | 15 + .../services/cluster_manager/__init__.py | 22 + .../services/cluster_manager/async_client.py | 3632 ++++++ .../services/cluster_manager/client.py | 3750 +++++++ .../services/cluster_manager/pagers.py | 140 + .../cluster_manager/transports/__init__.py | 33 + .../cluster_manager/transports/base.py | 694 ++ .../cluster_manager/transports/grpc.py | 1124 ++ .../transports/grpc_asyncio.py | 1128 ++ .../container_v1beta1/types/__init__.py | 246 + .../types/cluster_service.py | 5866 ++++++++++ owl-bot-staging/v1beta1/mypy.ini | 3 + owl-bot-staging/v1beta1/noxfile.py | 132 + .../fixup_container_v1beta1_keywords.py | 208 + owl-bot-staging/v1beta1/setup.py | 54 + owl-bot-staging/v1beta1/tests/__init__.py | 16 + .../v1beta1/tests/unit/__init__.py | 16 + .../v1beta1/tests/unit/gapic/__init__.py | 16 + .../unit/gapic/container_v1beta1/__init__.py | 16 + .../container_v1beta1/test_cluster_manager.py | 9846 +++++++++++++++++ 66 files changed, 55168 insertions(+) create mode 100644 owl-bot-staging/v1/.coveragerc create mode 100644 owl-bot-staging/v1/MANIFEST.in create mode 100644 owl-bot-staging/v1/README.rst create mode 100644 owl-bot-staging/v1/docs/conf.py create mode 100644 owl-bot-staging/v1/docs/container_v1/cluster_manager.rst create mode 100644 owl-bot-staging/v1/docs/container_v1/services.rst create mode 100644 owl-bot-staging/v1/docs/container_v1/types.rst create mode 100644 owl-bot-staging/v1/docs/index.rst create mode 100644 owl-bot-staging/v1/google/container/__init__.py create mode 100644 owl-bot-staging/v1/google/container/py.typed create mode 100644 owl-bot-staging/v1/google/container_v1/__init__.py create mode 100644 owl-bot-staging/v1/google/container_v1/gapic_metadata.json create mode 100644 owl-bot-staging/v1/google/container_v1/py.typed create mode 100644 owl-bot-staging/v1/google/container_v1/services/__init__.py create mode 100644 owl-bot-staging/v1/google/container_v1/services/cluster_manager/__init__.py create mode 100644 owl-bot-staging/v1/google/container_v1/services/cluster_manager/async_client.py create mode 100644 owl-bot-staging/v1/google/container_v1/services/cluster_manager/client.py create mode 100644 owl-bot-staging/v1/google/container_v1/services/cluster_manager/pagers.py create mode 100644 owl-bot-staging/v1/google/container_v1/services/cluster_manager/transports/__init__.py create mode 100644 owl-bot-staging/v1/google/container_v1/services/cluster_manager/transports/base.py create mode 100644 owl-bot-staging/v1/google/container_v1/services/cluster_manager/transports/grpc.py create mode 100644 owl-bot-staging/v1/google/container_v1/services/cluster_manager/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1/google/container_v1/types/__init__.py create mode 100644 owl-bot-staging/v1/google/container_v1/types/cluster_service.py create mode 100644 owl-bot-staging/v1/mypy.ini create mode 100644 owl-bot-staging/v1/noxfile.py create mode 100644 owl-bot-staging/v1/scripts/fixup_container_v1_keywords.py create mode 100644 owl-bot-staging/v1/setup.py create mode 100644 owl-bot-staging/v1/tests/__init__.py create mode 100644 owl-bot-staging/v1/tests/unit/__init__.py create mode 100644 owl-bot-staging/v1/tests/unit/gapic/__init__.py create mode 100644 owl-bot-staging/v1/tests/unit/gapic/container_v1/__init__.py create mode 100644 owl-bot-staging/v1/tests/unit/gapic/container_v1/test_cluster_manager.py create mode 100644 owl-bot-staging/v1beta1/.coveragerc create mode 100644 owl-bot-staging/v1beta1/MANIFEST.in create mode 100644 owl-bot-staging/v1beta1/README.rst create mode 100644 owl-bot-staging/v1beta1/docs/conf.py create mode 100644 owl-bot-staging/v1beta1/docs/container_v1beta1/cluster_manager.rst create mode 100644 owl-bot-staging/v1beta1/docs/container_v1beta1/services.rst create mode 100644 owl-bot-staging/v1beta1/docs/container_v1beta1/types.rst create mode 100644 owl-bot-staging/v1beta1/docs/index.rst create mode 100644 owl-bot-staging/v1beta1/google/container/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/container/py.typed create mode 100644 owl-bot-staging/v1beta1/google/container_v1beta1/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/container_v1beta1/gapic_metadata.json create mode 100644 owl-bot-staging/v1beta1/google/container_v1beta1/py.typed create mode 100644 owl-bot-staging/v1beta1/google/container_v1beta1/services/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/async_client.py create mode 100644 owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/client.py create mode 100644 owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/pagers.py create mode 100644 owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/transports/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/transports/base.py create mode 100644 owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/transports/grpc.py create mode 100644 owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/v1beta1/google/container_v1beta1/types/__init__.py create mode 100644 owl-bot-staging/v1beta1/google/container_v1beta1/types/cluster_service.py create mode 100644 owl-bot-staging/v1beta1/mypy.ini create mode 100644 owl-bot-staging/v1beta1/noxfile.py create mode 100644 owl-bot-staging/v1beta1/scripts/fixup_container_v1beta1_keywords.py create mode 100644 owl-bot-staging/v1beta1/setup.py create mode 100644 owl-bot-staging/v1beta1/tests/__init__.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/__init__.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/__init__.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/container_v1beta1/__init__.py create mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/container_v1beta1/test_cluster_manager.py diff --git a/owl-bot-staging/v1/.coveragerc b/owl-bot-staging/v1/.coveragerc new file mode 100644 index 00000000..f0a87b59 --- /dev/null +++ b/owl-bot-staging/v1/.coveragerc @@ -0,0 +1,17 @@ +[run] +branch = True + +[report] +show_missing = True +omit = + google/container/__init__.py +exclude_lines = + # Re-enable the standard pragma + pragma: NO COVER + # Ignore debug-only repr + def __repr__ + # Ignore pkg_resources exceptions. + # This is added at the module level as a safeguard for if someone + # generates the code and tries to run it without pip installing. This + # makes it virtually impossible to test properly. + except pkg_resources.DistributionNotFound diff --git a/owl-bot-staging/v1/MANIFEST.in b/owl-bot-staging/v1/MANIFEST.in new file mode 100644 index 00000000..cd146430 --- /dev/null +++ b/owl-bot-staging/v1/MANIFEST.in @@ -0,0 +1,2 @@ +recursive-include google/container *.py +recursive-include google/container_v1 *.py diff --git a/owl-bot-staging/v1/README.rst b/owl-bot-staging/v1/README.rst new file mode 100644 index 00000000..83d9858c --- /dev/null +++ b/owl-bot-staging/v1/README.rst @@ -0,0 +1,49 @@ +Python Client for Google Container API +================================================= + +Quick Start +----------- + +In order to use this library, you first need to go through the following steps: + +1. `Select or create a Cloud Platform project.`_ +2. `Enable billing for your project.`_ +3. Enable the Google Container API. +4. `Setup Authentication.`_ + +.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project +.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project +.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html + +Installation +~~~~~~~~~~~~ + +Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to +create isolated Python environments. The basic problem it addresses is one of +dependencies and versions, and indirectly permissions. + +With `virtualenv`_, it's possible to install this library without needing system +install permissions, and without clashing with the installed system +dependencies. + +.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ + + +Mac/Linux +^^^^^^^^^ + +.. code-block:: console + + python3 -m venv + source /bin/activate + /bin/pip install /path/to/library + + +Windows +^^^^^^^ + +.. code-block:: console + + python3 -m venv + \Scripts\activate + \Scripts\pip.exe install \path\to\library diff --git a/owl-bot-staging/v1/docs/conf.py b/owl-bot-staging/v1/docs/conf.py new file mode 100644 index 00000000..1f19408e --- /dev/null +++ b/owl-bot-staging/v1/docs/conf.py @@ -0,0 +1,376 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +# google-container documentation build configuration file +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys +import os +import shlex + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +sys.path.insert(0, os.path.abspath("..")) + +__version__ = "0.1.0" + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +needs_sphinx = "1.6.3" + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.autosummary", + "sphinx.ext.intersphinx", + "sphinx.ext.coverage", + "sphinx.ext.napoleon", + "sphinx.ext.todo", + "sphinx.ext.viewcode", +] + +# autodoc/autosummary flags +autoclass_content = "both" +autodoc_default_flags = ["members"] +autosummary_generate = True + + +# Add any paths that contain templates here, relative to this directory. +templates_path = ["_templates"] + +# Allow markdown includes (so releases.md can include CHANGLEOG.md) +# http://www.sphinx-doc.org/en/master/markdown.html +source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +source_suffix = [".rst", ".md"] + +# The encoding of source files. +# source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = "index" + +# General information about the project. +project = u"google-container" +copyright = u"2020, Google, LLC" +author = u"Google APIs" # TODO: autogenerate this bit + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The full version, including alpha/beta/rc tags. +release = __version__ +# The short X.Y version. +version = ".".join(release.split(".")[0:2]) + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# today = '' +# Else, today_fmt is used as the format for a strftime call. +# today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ["_build"] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +# add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +# show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = "sphinx" + +# A list of ignored prefixes for module index sorting. +# modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +# keep_warnings = False + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = True + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = "alabaster" + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +html_theme_options = { + "description": "Google Client Libraries for Python", + "github_user": "googleapis", + "github_repo": "google-cloud-python", + "github_banner": True, + "font_family": "'Roboto', Georgia, sans", + "head_font_family": "'Roboto', Georgia, serif", + "code_font_family": "'Roboto Mono', 'Consolas', monospace", +} + +# Add any paths that contain custom themes here, relative to this directory. +# html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +# html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +# html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +# html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static"] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +# html_extra_path = [] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +# html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# html_additional_pages = {} + +# If false, no module index is generated. +# html_domain_indices = True + +# If false, no index is generated. +# html_use_index = True + +# If true, the index is split into individual pages for each letter. +# html_split_index = False + +# If true, links to the reST sources are added to the pages. +# html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +# html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +# html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = None + +# Language to be used for generating the HTML full-text search index. +# Sphinx supports the following languages: +# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' +# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' +# html_search_language = 'en' + +# A dictionary with options for the search language support, empty by default. +# Now only 'ja' uses this config value +# html_search_options = {'type': 'default'} + +# The name of a javascript file (relative to the configuration directory) that +# implements a search results scorer. If empty, the default will be used. +# html_search_scorer = 'scorer.js' + +# Output file base name for HTML help builder. +htmlhelp_basename = "google-container-doc" + +# -- Options for warnings ------------------------------------------------------ + + +suppress_warnings = [ + # Temporarily suppress this to avoid "more than one target found for + # cross-reference" warning, which are intractable for us to avoid while in + # a mono-repo. + # See https://github.com/sphinx-doc/sphinx/blob + # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 + "ref.python" +] + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # 'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + # 'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + # 'preamble': '', + # Latex figure (float) alignment + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + ( + master_doc, + "google-container.tex", + u"google-container Documentation", + author, + "manual", + ) +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# latex_use_parts = False + +# If true, show page references after internal links. +# latex_show_pagerefs = False + +# If true, show URL addresses after external links. +# latex_show_urls = False + +# Documents to append as an appendix to all manuals. +# latex_appendices = [] + +# If false, no module index is generated. +# latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ( + master_doc, + "google-container", + u"Google Container Documentation", + [author], + 1, + ) +] + +# If true, show URL addresses after external links. +# man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ( + master_doc, + "google-container", + u"google-container Documentation", + author, + "google-container", + "GAPIC library for Google Container API", + "APIs", + ) +] + +# Documents to append as an appendix to all manuals. +# texinfo_appendices = [] + +# If false, no module index is generated. +# texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +# texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +# texinfo_no_detailmenu = False + + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = { + "python": ("http://python.readthedocs.org/en/latest/", None), + "gax": ("https://gax-python.readthedocs.org/en/latest/", None), + "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), + "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), + "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None), + "grpc": ("https://grpc.io/grpc/python/", None), + "requests": ("http://requests.kennethreitz.org/en/stable/", None), + "proto": ("https://proto-plus-python.readthedocs.io/en/stable", None), + "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None), +} + + +# Napoleon settings +napoleon_google_docstring = True +napoleon_numpy_docstring = True +napoleon_include_private_with_doc = False +napoleon_include_special_with_doc = True +napoleon_use_admonition_for_examples = False +napoleon_use_admonition_for_notes = False +napoleon_use_admonition_for_references = False +napoleon_use_ivar = False +napoleon_use_param = True +napoleon_use_rtype = True diff --git a/owl-bot-staging/v1/docs/container_v1/cluster_manager.rst b/owl-bot-staging/v1/docs/container_v1/cluster_manager.rst new file mode 100644 index 00000000..016a460e --- /dev/null +++ b/owl-bot-staging/v1/docs/container_v1/cluster_manager.rst @@ -0,0 +1,10 @@ +ClusterManager +-------------------------------- + +.. automodule:: google.container_v1.services.cluster_manager + :members: + :inherited-members: + +.. automodule:: google.container_v1.services.cluster_manager.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/container_v1/services.rst b/owl-bot-staging/v1/docs/container_v1/services.rst new file mode 100644 index 00000000..faa067b2 --- /dev/null +++ b/owl-bot-staging/v1/docs/container_v1/services.rst @@ -0,0 +1,6 @@ +Services for Google Container v1 API +==================================== +.. toctree:: + :maxdepth: 2 + + cluster_manager diff --git a/owl-bot-staging/v1/docs/container_v1/types.rst b/owl-bot-staging/v1/docs/container_v1/types.rst new file mode 100644 index 00000000..97997d9c --- /dev/null +++ b/owl-bot-staging/v1/docs/container_v1/types.rst @@ -0,0 +1,7 @@ +Types for Google Container v1 API +================================= + +.. automodule:: google.container_v1.types + :members: + :undoc-members: + :show-inheritance: diff --git a/owl-bot-staging/v1/docs/index.rst b/owl-bot-staging/v1/docs/index.rst new file mode 100644 index 00000000..661ade54 --- /dev/null +++ b/owl-bot-staging/v1/docs/index.rst @@ -0,0 +1,7 @@ +API Reference +------------- +.. toctree:: + :maxdepth: 2 + + container_v1/services + container_v1/types diff --git a/owl-bot-staging/v1/google/container/__init__.py b/owl-bot-staging/v1/google/container/__init__.py new file mode 100644 index 00000000..3a1cd2e9 --- /dev/null +++ b/owl-bot-staging/v1/google/container/__init__.py @@ -0,0 +1,213 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from google.container_v1.services.cluster_manager.client import ClusterManagerClient +from google.container_v1.services.cluster_manager.async_client import ClusterManagerAsyncClient + +from google.container_v1.types.cluster_service import AcceleratorConfig +from google.container_v1.types.cluster_service import AddonsConfig +from google.container_v1.types.cluster_service import AuthenticatorGroupsConfig +from google.container_v1.types.cluster_service import AutoprovisioningNodePoolDefaults +from google.container_v1.types.cluster_service import AutoUpgradeOptions +from google.container_v1.types.cluster_service import BinaryAuthorization +from google.container_v1.types.cluster_service import CancelOperationRequest +from google.container_v1.types.cluster_service import ClientCertificateConfig +from google.container_v1.types.cluster_service import CloudRunConfig +from google.container_v1.types.cluster_service import Cluster +from google.container_v1.types.cluster_service import ClusterAutoscaling +from google.container_v1.types.cluster_service import ClusterUpdate +from google.container_v1.types.cluster_service import CompleteIPRotationRequest +from google.container_v1.types.cluster_service import ConfigConnectorConfig +from google.container_v1.types.cluster_service import CreateClusterRequest +from google.container_v1.types.cluster_service import CreateNodePoolRequest +from google.container_v1.types.cluster_service import DailyMaintenanceWindow +from google.container_v1.types.cluster_service import DatabaseEncryption +from google.container_v1.types.cluster_service import DefaultSnatStatus +from google.container_v1.types.cluster_service import DeleteClusterRequest +from google.container_v1.types.cluster_service import DeleteNodePoolRequest +from google.container_v1.types.cluster_service import DnsCacheConfig +from google.container_v1.types.cluster_service import GetClusterRequest +from google.container_v1.types.cluster_service import GetJSONWebKeysRequest +from google.container_v1.types.cluster_service import GetJSONWebKeysResponse +from google.container_v1.types.cluster_service import GetNodePoolRequest +from google.container_v1.types.cluster_service import GetOpenIDConfigRequest +from google.container_v1.types.cluster_service import GetOpenIDConfigResponse +from google.container_v1.types.cluster_service import GetOperationRequest +from google.container_v1.types.cluster_service import GetServerConfigRequest +from google.container_v1.types.cluster_service import HorizontalPodAutoscaling +from google.container_v1.types.cluster_service import HttpLoadBalancing +from google.container_v1.types.cluster_service import IntraNodeVisibilityConfig +from google.container_v1.types.cluster_service import IPAllocationPolicy +from google.container_v1.types.cluster_service import Jwk +from google.container_v1.types.cluster_service import KubernetesDashboard +from google.container_v1.types.cluster_service import LegacyAbac +from google.container_v1.types.cluster_service import ListClustersRequest +from google.container_v1.types.cluster_service import ListClustersResponse +from google.container_v1.types.cluster_service import ListNodePoolsRequest +from google.container_v1.types.cluster_service import ListNodePoolsResponse +from google.container_v1.types.cluster_service import ListOperationsRequest +from google.container_v1.types.cluster_service import ListOperationsResponse +from google.container_v1.types.cluster_service import ListUsableSubnetworksRequest +from google.container_v1.types.cluster_service import ListUsableSubnetworksResponse +from google.container_v1.types.cluster_service import MaintenancePolicy +from google.container_v1.types.cluster_service import MaintenanceWindow +from google.container_v1.types.cluster_service import MasterAuth +from google.container_v1.types.cluster_service import MasterAuthorizedNetworksConfig +from google.container_v1.types.cluster_service import MaxPodsConstraint +from google.container_v1.types.cluster_service import NetworkConfig +from google.container_v1.types.cluster_service import NetworkPolicy +from google.container_v1.types.cluster_service import NetworkPolicyConfig +from google.container_v1.types.cluster_service import NodeConfig +from google.container_v1.types.cluster_service import NodeManagement +from google.container_v1.types.cluster_service import NodePool +from google.container_v1.types.cluster_service import NodePoolAutoscaling +from google.container_v1.types.cluster_service import NodeTaint +from google.container_v1.types.cluster_service import Operation +from google.container_v1.types.cluster_service import OperationProgress +from google.container_v1.types.cluster_service import PrivateClusterConfig +from google.container_v1.types.cluster_service import PrivateClusterMasterGlobalAccessConfig +from google.container_v1.types.cluster_service import RecurringTimeWindow +from google.container_v1.types.cluster_service import ReleaseChannel +from google.container_v1.types.cluster_service import ReservationAffinity +from google.container_v1.types.cluster_service import ResourceLimit +from google.container_v1.types.cluster_service import ResourceUsageExportConfig +from google.container_v1.types.cluster_service import RollbackNodePoolUpgradeRequest +from google.container_v1.types.cluster_service import SandboxConfig +from google.container_v1.types.cluster_service import ServerConfig +from google.container_v1.types.cluster_service import SetAddonsConfigRequest +from google.container_v1.types.cluster_service import SetLabelsRequest +from google.container_v1.types.cluster_service import SetLegacyAbacRequest +from google.container_v1.types.cluster_service import SetLocationsRequest +from google.container_v1.types.cluster_service import SetLoggingServiceRequest +from google.container_v1.types.cluster_service import SetMaintenancePolicyRequest +from google.container_v1.types.cluster_service import SetMasterAuthRequest +from google.container_v1.types.cluster_service import SetMonitoringServiceRequest +from google.container_v1.types.cluster_service import SetNetworkPolicyRequest +from google.container_v1.types.cluster_service import SetNodePoolAutoscalingRequest +from google.container_v1.types.cluster_service import SetNodePoolManagementRequest +from google.container_v1.types.cluster_service import SetNodePoolSizeRequest +from google.container_v1.types.cluster_service import ShieldedInstanceConfig +from google.container_v1.types.cluster_service import ShieldedNodes +from google.container_v1.types.cluster_service import StartIPRotationRequest +from google.container_v1.types.cluster_service import StatusCondition +from google.container_v1.types.cluster_service import TimeWindow +from google.container_v1.types.cluster_service import UpdateClusterRequest +from google.container_v1.types.cluster_service import UpdateMasterRequest +from google.container_v1.types.cluster_service import UpdateNodePoolRequest +from google.container_v1.types.cluster_service import UsableSubnetwork +from google.container_v1.types.cluster_service import UsableSubnetworkSecondaryRange +from google.container_v1.types.cluster_service import VerticalPodAutoscaling +from google.container_v1.types.cluster_service import WorkloadIdentityConfig +from google.container_v1.types.cluster_service import WorkloadMetadataConfig + +__all__ = ('ClusterManagerClient', + 'ClusterManagerAsyncClient', + 'AcceleratorConfig', + 'AddonsConfig', + 'AuthenticatorGroupsConfig', + 'AutoprovisioningNodePoolDefaults', + 'AutoUpgradeOptions', + 'BinaryAuthorization', + 'CancelOperationRequest', + 'ClientCertificateConfig', + 'CloudRunConfig', + 'Cluster', + 'ClusterAutoscaling', + 'ClusterUpdate', + 'CompleteIPRotationRequest', + 'ConfigConnectorConfig', + 'CreateClusterRequest', + 'CreateNodePoolRequest', + 'DailyMaintenanceWindow', + 'DatabaseEncryption', + 'DefaultSnatStatus', + 'DeleteClusterRequest', + 'DeleteNodePoolRequest', + 'DnsCacheConfig', + 'GetClusterRequest', + 'GetJSONWebKeysRequest', + 'GetJSONWebKeysResponse', + 'GetNodePoolRequest', + 'GetOpenIDConfigRequest', + 'GetOpenIDConfigResponse', + 'GetOperationRequest', + 'GetServerConfigRequest', + 'HorizontalPodAutoscaling', + 'HttpLoadBalancing', + 'IntraNodeVisibilityConfig', + 'IPAllocationPolicy', + 'Jwk', + 'KubernetesDashboard', + 'LegacyAbac', + 'ListClustersRequest', + 'ListClustersResponse', + 'ListNodePoolsRequest', + 'ListNodePoolsResponse', + 'ListOperationsRequest', + 'ListOperationsResponse', + 'ListUsableSubnetworksRequest', + 'ListUsableSubnetworksResponse', + 'MaintenancePolicy', + 'MaintenanceWindow', + 'MasterAuth', + 'MasterAuthorizedNetworksConfig', + 'MaxPodsConstraint', + 'NetworkConfig', + 'NetworkPolicy', + 'NetworkPolicyConfig', + 'NodeConfig', + 'NodeManagement', + 'NodePool', + 'NodePoolAutoscaling', + 'NodeTaint', + 'Operation', + 'OperationProgress', + 'PrivateClusterConfig', + 'PrivateClusterMasterGlobalAccessConfig', + 'RecurringTimeWindow', + 'ReleaseChannel', + 'ReservationAffinity', + 'ResourceLimit', + 'ResourceUsageExportConfig', + 'RollbackNodePoolUpgradeRequest', + 'SandboxConfig', + 'ServerConfig', + 'SetAddonsConfigRequest', + 'SetLabelsRequest', + 'SetLegacyAbacRequest', + 'SetLocationsRequest', + 'SetLoggingServiceRequest', + 'SetMaintenancePolicyRequest', + 'SetMasterAuthRequest', + 'SetMonitoringServiceRequest', + 'SetNetworkPolicyRequest', + 'SetNodePoolAutoscalingRequest', + 'SetNodePoolManagementRequest', + 'SetNodePoolSizeRequest', + 'ShieldedInstanceConfig', + 'ShieldedNodes', + 'StartIPRotationRequest', + 'StatusCondition', + 'TimeWindow', + 'UpdateClusterRequest', + 'UpdateMasterRequest', + 'UpdateNodePoolRequest', + 'UsableSubnetwork', + 'UsableSubnetworkSecondaryRange', + 'VerticalPodAutoscaling', + 'WorkloadIdentityConfig', + 'WorkloadMetadataConfig', +) diff --git a/owl-bot-staging/v1/google/container/py.typed b/owl-bot-staging/v1/google/container/py.typed new file mode 100644 index 00000000..fd835114 --- /dev/null +++ b/owl-bot-staging/v1/google/container/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-container package uses inline types. diff --git a/owl-bot-staging/v1/google/container_v1/__init__.py b/owl-bot-staging/v1/google/container_v1/__init__.py new file mode 100644 index 00000000..e9efa5ea --- /dev/null +++ b/owl-bot-staging/v1/google/container_v1/__init__.py @@ -0,0 +1,214 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .services.cluster_manager import ClusterManagerClient +from .services.cluster_manager import ClusterManagerAsyncClient + +from .types.cluster_service import AcceleratorConfig +from .types.cluster_service import AddonsConfig +from .types.cluster_service import AuthenticatorGroupsConfig +from .types.cluster_service import AutoprovisioningNodePoolDefaults +from .types.cluster_service import AutoUpgradeOptions +from .types.cluster_service import BinaryAuthorization +from .types.cluster_service import CancelOperationRequest +from .types.cluster_service import ClientCertificateConfig +from .types.cluster_service import CloudRunConfig +from .types.cluster_service import Cluster +from .types.cluster_service import ClusterAutoscaling +from .types.cluster_service import ClusterUpdate +from .types.cluster_service import CompleteIPRotationRequest +from .types.cluster_service import ConfigConnectorConfig +from .types.cluster_service import CreateClusterRequest +from .types.cluster_service import CreateNodePoolRequest +from .types.cluster_service import DailyMaintenanceWindow +from .types.cluster_service import DatabaseEncryption +from .types.cluster_service import DefaultSnatStatus +from .types.cluster_service import DeleteClusterRequest +from .types.cluster_service import DeleteNodePoolRequest +from .types.cluster_service import DnsCacheConfig +from .types.cluster_service import GetClusterRequest +from .types.cluster_service import GetJSONWebKeysRequest +from .types.cluster_service import GetJSONWebKeysResponse +from .types.cluster_service import GetNodePoolRequest +from .types.cluster_service import GetOpenIDConfigRequest +from .types.cluster_service import GetOpenIDConfigResponse +from .types.cluster_service import GetOperationRequest +from .types.cluster_service import GetServerConfigRequest +from .types.cluster_service import HorizontalPodAutoscaling +from .types.cluster_service import HttpLoadBalancing +from .types.cluster_service import IntraNodeVisibilityConfig +from .types.cluster_service import IPAllocationPolicy +from .types.cluster_service import Jwk +from .types.cluster_service import KubernetesDashboard +from .types.cluster_service import LegacyAbac +from .types.cluster_service import ListClustersRequest +from .types.cluster_service import ListClustersResponse +from .types.cluster_service import ListNodePoolsRequest +from .types.cluster_service import ListNodePoolsResponse +from .types.cluster_service import ListOperationsRequest +from .types.cluster_service import ListOperationsResponse +from .types.cluster_service import ListUsableSubnetworksRequest +from .types.cluster_service import ListUsableSubnetworksResponse +from .types.cluster_service import MaintenancePolicy +from .types.cluster_service import MaintenanceWindow +from .types.cluster_service import MasterAuth +from .types.cluster_service import MasterAuthorizedNetworksConfig +from .types.cluster_service import MaxPodsConstraint +from .types.cluster_service import NetworkConfig +from .types.cluster_service import NetworkPolicy +from .types.cluster_service import NetworkPolicyConfig +from .types.cluster_service import NodeConfig +from .types.cluster_service import NodeManagement +from .types.cluster_service import NodePool +from .types.cluster_service import NodePoolAutoscaling +from .types.cluster_service import NodeTaint +from .types.cluster_service import Operation +from .types.cluster_service import OperationProgress +from .types.cluster_service import PrivateClusterConfig +from .types.cluster_service import PrivateClusterMasterGlobalAccessConfig +from .types.cluster_service import RecurringTimeWindow +from .types.cluster_service import ReleaseChannel +from .types.cluster_service import ReservationAffinity +from .types.cluster_service import ResourceLimit +from .types.cluster_service import ResourceUsageExportConfig +from .types.cluster_service import RollbackNodePoolUpgradeRequest +from .types.cluster_service import SandboxConfig +from .types.cluster_service import ServerConfig +from .types.cluster_service import SetAddonsConfigRequest +from .types.cluster_service import SetLabelsRequest +from .types.cluster_service import SetLegacyAbacRequest +from .types.cluster_service import SetLocationsRequest +from .types.cluster_service import SetLoggingServiceRequest +from .types.cluster_service import SetMaintenancePolicyRequest +from .types.cluster_service import SetMasterAuthRequest +from .types.cluster_service import SetMonitoringServiceRequest +from .types.cluster_service import SetNetworkPolicyRequest +from .types.cluster_service import SetNodePoolAutoscalingRequest +from .types.cluster_service import SetNodePoolManagementRequest +from .types.cluster_service import SetNodePoolSizeRequest +from .types.cluster_service import ShieldedInstanceConfig +from .types.cluster_service import ShieldedNodes +from .types.cluster_service import StartIPRotationRequest +from .types.cluster_service import StatusCondition +from .types.cluster_service import TimeWindow +from .types.cluster_service import UpdateClusterRequest +from .types.cluster_service import UpdateMasterRequest +from .types.cluster_service import UpdateNodePoolRequest +from .types.cluster_service import UsableSubnetwork +from .types.cluster_service import UsableSubnetworkSecondaryRange +from .types.cluster_service import VerticalPodAutoscaling +from .types.cluster_service import WorkloadIdentityConfig +from .types.cluster_service import WorkloadMetadataConfig + +__all__ = ( + 'ClusterManagerAsyncClient', +'AcceleratorConfig', +'AddonsConfig', +'AuthenticatorGroupsConfig', +'AutoUpgradeOptions', +'AutoprovisioningNodePoolDefaults', +'BinaryAuthorization', +'CancelOperationRequest', +'ClientCertificateConfig', +'CloudRunConfig', +'Cluster', +'ClusterAutoscaling', +'ClusterManagerClient', +'ClusterUpdate', +'CompleteIPRotationRequest', +'ConfigConnectorConfig', +'CreateClusterRequest', +'CreateNodePoolRequest', +'DailyMaintenanceWindow', +'DatabaseEncryption', +'DefaultSnatStatus', +'DeleteClusterRequest', +'DeleteNodePoolRequest', +'DnsCacheConfig', +'GetClusterRequest', +'GetJSONWebKeysRequest', +'GetJSONWebKeysResponse', +'GetNodePoolRequest', +'GetOpenIDConfigRequest', +'GetOpenIDConfigResponse', +'GetOperationRequest', +'GetServerConfigRequest', +'HorizontalPodAutoscaling', +'HttpLoadBalancing', +'IPAllocationPolicy', +'IntraNodeVisibilityConfig', +'Jwk', +'KubernetesDashboard', +'LegacyAbac', +'ListClustersRequest', +'ListClustersResponse', +'ListNodePoolsRequest', +'ListNodePoolsResponse', +'ListOperationsRequest', +'ListOperationsResponse', +'ListUsableSubnetworksRequest', +'ListUsableSubnetworksResponse', +'MaintenancePolicy', +'MaintenanceWindow', +'MasterAuth', +'MasterAuthorizedNetworksConfig', +'MaxPodsConstraint', +'NetworkConfig', +'NetworkPolicy', +'NetworkPolicyConfig', +'NodeConfig', +'NodeManagement', +'NodePool', +'NodePoolAutoscaling', +'NodeTaint', +'Operation', +'OperationProgress', +'PrivateClusterConfig', +'PrivateClusterMasterGlobalAccessConfig', +'RecurringTimeWindow', +'ReleaseChannel', +'ReservationAffinity', +'ResourceLimit', +'ResourceUsageExportConfig', +'RollbackNodePoolUpgradeRequest', +'SandboxConfig', +'ServerConfig', +'SetAddonsConfigRequest', +'SetLabelsRequest', +'SetLegacyAbacRequest', +'SetLocationsRequest', +'SetLoggingServiceRequest', +'SetMaintenancePolicyRequest', +'SetMasterAuthRequest', +'SetMonitoringServiceRequest', +'SetNetworkPolicyRequest', +'SetNodePoolAutoscalingRequest', +'SetNodePoolManagementRequest', +'SetNodePoolSizeRequest', +'ShieldedInstanceConfig', +'ShieldedNodes', +'StartIPRotationRequest', +'StatusCondition', +'TimeWindow', +'UpdateClusterRequest', +'UpdateMasterRequest', +'UpdateNodePoolRequest', +'UsableSubnetwork', +'UsableSubnetworkSecondaryRange', +'VerticalPodAutoscaling', +'WorkloadIdentityConfig', +'WorkloadMetadataConfig', +) diff --git a/owl-bot-staging/v1/google/container_v1/gapic_metadata.json b/owl-bot-staging/v1/google/container_v1/gapic_metadata.json new file mode 100644 index 00000000..1638f865 --- /dev/null +++ b/owl-bot-staging/v1/google/container_v1/gapic_metadata.json @@ -0,0 +1,343 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.container_v1", + "protoPackage": "google.container.v1", + "schema": "1.0", + "services": { + "ClusterManager": { + "clients": { + "grpc": { + "libraryClient": "ClusterManagerClient", + "rpcs": { + "CancelOperation": { + "methods": [ + "cancel_operation" + ] + }, + "CompleteIPRotation": { + "methods": [ + "complete_ip_rotation" + ] + }, + "CreateCluster": { + "methods": [ + "create_cluster" + ] + }, + "CreateNodePool": { + "methods": [ + "create_node_pool" + ] + }, + "DeleteCluster": { + "methods": [ + "delete_cluster" + ] + }, + "DeleteNodePool": { + "methods": [ + "delete_node_pool" + ] + }, + "GetCluster": { + "methods": [ + "get_cluster" + ] + }, + "GetJSONWebKeys": { + "methods": [ + "get_json_web_keys" + ] + }, + "GetNodePool": { + "methods": [ + "get_node_pool" + ] + }, + "GetOperation": { + "methods": [ + "get_operation" + ] + }, + "GetServerConfig": { + "methods": [ + "get_server_config" + ] + }, + "ListClusters": { + "methods": [ + "list_clusters" + ] + }, + "ListNodePools": { + "methods": [ + "list_node_pools" + ] + }, + "ListOperations": { + "methods": [ + "list_operations" + ] + }, + "ListUsableSubnetworks": { + "methods": [ + "list_usable_subnetworks" + ] + }, + "RollbackNodePoolUpgrade": { + "methods": [ + "rollback_node_pool_upgrade" + ] + }, + "SetAddonsConfig": { + "methods": [ + "set_addons_config" + ] + }, + "SetLabels": { + "methods": [ + "set_labels" + ] + }, + "SetLegacyAbac": { + "methods": [ + "set_legacy_abac" + ] + }, + "SetLocations": { + "methods": [ + "set_locations" + ] + }, + "SetLoggingService": { + "methods": [ + "set_logging_service" + ] + }, + "SetMaintenancePolicy": { + "methods": [ + "set_maintenance_policy" + ] + }, + "SetMasterAuth": { + "methods": [ + "set_master_auth" + ] + }, + "SetMonitoringService": { + "methods": [ + "set_monitoring_service" + ] + }, + "SetNetworkPolicy": { + "methods": [ + "set_network_policy" + ] + }, + "SetNodePoolAutoscaling": { + "methods": [ + "set_node_pool_autoscaling" + ] + }, + "SetNodePoolManagement": { + "methods": [ + "set_node_pool_management" + ] + }, + "SetNodePoolSize": { + "methods": [ + "set_node_pool_size" + ] + }, + "StartIPRotation": { + "methods": [ + "start_ip_rotation" + ] + }, + "UpdateCluster": { + "methods": [ + "update_cluster" + ] + }, + "UpdateMaster": { + "methods": [ + "update_master" + ] + }, + "UpdateNodePool": { + "methods": [ + "update_node_pool" + ] + } + } + }, + "grpc-async": { + "libraryClient": "ClusterManagerAsyncClient", + "rpcs": { + "CancelOperation": { + "methods": [ + "cancel_operation" + ] + }, + "CompleteIPRotation": { + "methods": [ + "complete_ip_rotation" + ] + }, + "CreateCluster": { + "methods": [ + "create_cluster" + ] + }, + "CreateNodePool": { + "methods": [ + "create_node_pool" + ] + }, + "DeleteCluster": { + "methods": [ + "delete_cluster" + ] + }, + "DeleteNodePool": { + "methods": [ + "delete_node_pool" + ] + }, + "GetCluster": { + "methods": [ + "get_cluster" + ] + }, + "GetJSONWebKeys": { + "methods": [ + "get_json_web_keys" + ] + }, + "GetNodePool": { + "methods": [ + "get_node_pool" + ] + }, + "GetOperation": { + "methods": [ + "get_operation" + ] + }, + "GetServerConfig": { + "methods": [ + "get_server_config" + ] + }, + "ListClusters": { + "methods": [ + "list_clusters" + ] + }, + "ListNodePools": { + "methods": [ + "list_node_pools" + ] + }, + "ListOperations": { + "methods": [ + "list_operations" + ] + }, + "ListUsableSubnetworks": { + "methods": [ + "list_usable_subnetworks" + ] + }, + "RollbackNodePoolUpgrade": { + "methods": [ + "rollback_node_pool_upgrade" + ] + }, + "SetAddonsConfig": { + "methods": [ + "set_addons_config" + ] + }, + "SetLabels": { + "methods": [ + "set_labels" + ] + }, + "SetLegacyAbac": { + "methods": [ + "set_legacy_abac" + ] + }, + "SetLocations": { + "methods": [ + "set_locations" + ] + }, + "SetLoggingService": { + "methods": [ + "set_logging_service" + ] + }, + "SetMaintenancePolicy": { + "methods": [ + "set_maintenance_policy" + ] + }, + "SetMasterAuth": { + "methods": [ + "set_master_auth" + ] + }, + "SetMonitoringService": { + "methods": [ + "set_monitoring_service" + ] + }, + "SetNetworkPolicy": { + "methods": [ + "set_network_policy" + ] + }, + "SetNodePoolAutoscaling": { + "methods": [ + "set_node_pool_autoscaling" + ] + }, + "SetNodePoolManagement": { + "methods": [ + "set_node_pool_management" + ] + }, + "SetNodePoolSize": { + "methods": [ + "set_node_pool_size" + ] + }, + "StartIPRotation": { + "methods": [ + "start_ip_rotation" + ] + }, + "UpdateCluster": { + "methods": [ + "update_cluster" + ] + }, + "UpdateMaster": { + "methods": [ + "update_master" + ] + }, + "UpdateNodePool": { + "methods": [ + "update_node_pool" + ] + } + } + } + } + } + } +} diff --git a/owl-bot-staging/v1/google/container_v1/py.typed b/owl-bot-staging/v1/google/container_v1/py.typed new file mode 100644 index 00000000..fd835114 --- /dev/null +++ b/owl-bot-staging/v1/google/container_v1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-container package uses inline types. diff --git a/owl-bot-staging/v1/google/container_v1/services/__init__.py b/owl-bot-staging/v1/google/container_v1/services/__init__.py new file mode 100644 index 00000000..4de65971 --- /dev/null +++ b/owl-bot-staging/v1/google/container_v1/services/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/google/container_v1/services/cluster_manager/__init__.py b/owl-bot-staging/v1/google/container_v1/services/cluster_manager/__init__.py new file mode 100644 index 00000000..490efad3 --- /dev/null +++ b/owl-bot-staging/v1/google/container_v1/services/cluster_manager/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import ClusterManagerClient +from .async_client import ClusterManagerAsyncClient + +__all__ = ( + 'ClusterManagerClient', + 'ClusterManagerAsyncClient', +) diff --git a/owl-bot-staging/v1/google/container_v1/services/cluster_manager/async_client.py b/owl-bot-staging/v1/google/container_v1/services/cluster_manager/async_client.py new file mode 100644 index 00000000..b53572ad --- /dev/null +++ b/owl-bot-staging/v1/google/container_v1/services/cluster_manager/async_client.py @@ -0,0 +1,3604 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources +import warnings + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.container_v1.services.cluster_manager import pagers +from google.container_v1.types import cluster_service +from .transports.base import ClusterManagerTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import ClusterManagerGrpcAsyncIOTransport +from .client import ClusterManagerClient + + +class ClusterManagerAsyncClient: + """Google Kubernetes Engine Cluster Manager v1""" + + _client: ClusterManagerClient + + DEFAULT_ENDPOINT = ClusterManagerClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = ClusterManagerClient.DEFAULT_MTLS_ENDPOINT + + common_billing_account_path = staticmethod(ClusterManagerClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(ClusterManagerClient.parse_common_billing_account_path) + common_folder_path = staticmethod(ClusterManagerClient.common_folder_path) + parse_common_folder_path = staticmethod(ClusterManagerClient.parse_common_folder_path) + common_organization_path = staticmethod(ClusterManagerClient.common_organization_path) + parse_common_organization_path = staticmethod(ClusterManagerClient.parse_common_organization_path) + common_project_path = staticmethod(ClusterManagerClient.common_project_path) + parse_common_project_path = staticmethod(ClusterManagerClient.parse_common_project_path) + common_location_path = staticmethod(ClusterManagerClient.common_location_path) + parse_common_location_path = staticmethod(ClusterManagerClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ClusterManagerAsyncClient: The constructed client. + """ + return ClusterManagerClient.from_service_account_info.__func__(ClusterManagerAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ClusterManagerAsyncClient: The constructed client. + """ + return ClusterManagerClient.from_service_account_file.__func__(ClusterManagerAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> ClusterManagerTransport: + """Returns the transport used by the client instance. + + Returns: + ClusterManagerTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(ClusterManagerClient).get_transport_class, type(ClusterManagerClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, ClusterManagerTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the cluster manager client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.ClusterManagerTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = ClusterManagerClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def list_clusters(self, + request: cluster_service.ListClustersRequest = None, + *, + project_id: str = None, + zone: str = None, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.ListClustersResponse: + r"""Lists all clusters owned by a project in either the + specified zone or all zones. + + Args: + request (:class:`google.container_v1.types.ListClustersRequest`): + The request object. ListClustersRequest lists clusters. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the + parent field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides, or "-" for all zones. This + field has been deprecated and replaced by the parent + field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + parent (:class:`str`): + The parent (project and location) where the clusters + will be listed. Specified in the format + ``projects/*/locations/*``. Location "-" matches all + zones and all regions. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.ListClustersResponse: + ListClustersResponse is the result of + ListClustersRequest. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.ListClustersRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_clusters, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=20.0, + ), + default_timeout=20.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_cluster(self, + request: cluster_service.GetClusterRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Cluster: + r"""Gets the details of a specific cluster. + + Args: + request (:class:`google.container_v1.types.GetClusterRequest`): + The request object. GetClusterRequest gets the settings + of a cluster. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster + to retrieve. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, cluster) of the cluster to + retrieve. Specified in the format + ``projects/*/locations/*/clusters/*``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.Cluster: + A Google Kubernetes Engine cluster. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.GetClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_cluster, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=20.0, + ), + default_timeout=20.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def create_cluster(self, + request: cluster_service.CreateClusterRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster: cluster_service.Cluster = None, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Creates a cluster, consisting of the specified number and type + of Google Compute Engine instances. + + By default, the cluster is created in the project's `default + network `__. + + One firewall is added for the cluster. After cluster creation, + the Kubelet creates routes for each node to allow the containers + on that node to communicate with all other instances in the + cluster. + + Finally, an entry is added to the project's global metadata + indicating which CIDR range the cluster is using. + + Args: + request (:class:`google.container_v1.types.CreateClusterRequest`): + The request object. CreateClusterRequest creates a + cluster. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the + parent field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the parent field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster (:class:`google.container_v1.types.Cluster`): + Required. A `cluster + resource `__ + + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + parent (:class:`str`): + The parent (project and location) where the cluster will + be created. Specified in the format + ``projects/*/locations/*``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster, parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.CreateClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster is not None: + request.cluster = cluster + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_cluster, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_cluster(self, + request: cluster_service.UpdateClusterRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + update: cluster_service.ClusterUpdate = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Updates the settings of a specific cluster. + + Args: + request (:class:`google.container_v1.types.UpdateClusterRequest`): + The request object. UpdateClusterRequest updates the + settings of a cluster. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster + to upgrade. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update (:class:`google.container_v1.types.ClusterUpdate`): + Required. A description of the + update. + + This corresponds to the ``update`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, cluster) of the cluster to + update. Specified in the format + ``projects/*/locations/*/clusters/*``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, update, name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.UpdateClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if update is not None: + request.update = update + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_cluster, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_node_pool(self, + request: cluster_service.UpdateNodePoolRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Updates the version and/or image type for the + specified node pool. + + Args: + request (:class:`google.container_v1.types.UpdateNodePoolRequest`): + The request object. UpdateNodePoolRequests update a node + pool's image and/or version. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + request = cluster_service.UpdateNodePoolRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_node_pool, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def set_node_pool_autoscaling(self, + request: cluster_service.SetNodePoolAutoscalingRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the autoscaling settings for the specified node + pool. + + Args: + request (:class:`google.container_v1.types.SetNodePoolAutoscalingRequest`): + The request object. SetNodePoolAutoscalingRequest sets + the autoscaler settings of a node pool. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + request = cluster_service.SetNodePoolAutoscalingRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_node_pool_autoscaling, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def set_logging_service(self, + request: cluster_service.SetLoggingServiceRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + logging_service: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the logging service for a specific cluster. + + Args: + request (:class:`google.container_v1.types.SetLoggingServiceRequest`): + The request object. SetLoggingServiceRequest sets the + logging service of a cluster. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster + to upgrade. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + logging_service (:class:`str`): + Required. The logging service the cluster should use to + write logs. Currently available options: + + - ``logging.googleapis.com/kubernetes`` - The Cloud + Logging service with a Kubernetes-native resource + model + - ``logging.googleapis.com`` - The legacy Cloud Logging + service (no longer available as of GKE 1.15). + - ``none`` - no logs will be exported from the cluster. + + If left as an empty + string,\ ``logging.googleapis.com/kubernetes`` will be + used for GKE 1.14+ or ``logging.googleapis.com`` for + earlier versions. + + This corresponds to the ``logging_service`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, cluster) of the cluster to + set logging. Specified in the format + ``projects/*/locations/*/clusters/*``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, logging_service, name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.SetLoggingServiceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if logging_service is not None: + request.logging_service = logging_service + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_logging_service, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def set_monitoring_service(self, + request: cluster_service.SetMonitoringServiceRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + monitoring_service: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the monitoring service for a specific cluster. + + Args: + request (:class:`google.container_v1.types.SetMonitoringServiceRequest`): + The request object. SetMonitoringServiceRequest sets the + monitoring service of a cluster. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster + to upgrade. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + monitoring_service (:class:`str`): + Required. The monitoring service the cluster should use + to write metrics. Currently available options: + + - "monitoring.googleapis.com/kubernetes" - The Cloud + Monitoring service with a Kubernetes-native resource + model + - ``monitoring.googleapis.com`` - The legacy Cloud + Monitoring service (no longer available as of GKE + 1.15). + - ``none`` - No metrics will be exported from the + cluster. + + If left as an empty + string,\ ``monitoring.googleapis.com/kubernetes`` will + be used for GKE 1.14+ or ``monitoring.googleapis.com`` + for earlier versions. + + This corresponds to the ``monitoring_service`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, cluster) of the cluster to + set monitoring. Specified in the format + ``projects/*/locations/*/clusters/*``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, monitoring_service, name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.SetMonitoringServiceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if monitoring_service is not None: + request.monitoring_service = monitoring_service + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_monitoring_service, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def set_addons_config(self, + request: cluster_service.SetAddonsConfigRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + addons_config: cluster_service.AddonsConfig = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the addons for a specific cluster. + + Args: + request (:class:`google.container_v1.types.SetAddonsConfigRequest`): + The request object. SetAddonsConfigRequest sets the + addons associated with the cluster. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster + to upgrade. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + addons_config (:class:`google.container_v1.types.AddonsConfig`): + Required. The desired configurations + for the various addons available to run + in the cluster. + + This corresponds to the ``addons_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, cluster) of the cluster to + set addons. Specified in the format + ``projects/*/locations/*/clusters/*``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, addons_config, name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.SetAddonsConfigRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if addons_config is not None: + request.addons_config = addons_config + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_addons_config, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def set_locations(self, + request: cluster_service.SetLocationsRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + locations: Sequence[str] = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the locations for a specific cluster. Deprecated. Use + `projects.locations.clusters.update `__ + instead. + + Args: + request (:class:`google.container_v1.types.SetLocationsRequest`): + The request object. SetLocationsRequest sets the + locations of the cluster. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster + to upgrade. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + locations (:class:`Sequence[str]`): + Required. The desired list of Google Compute Engine + `zones `__ + in which the cluster's nodes should be located. Changing + the locations a cluster is in will result in nodes being + either created or removed from the cluster, depending on + whether locations are being added or removed. + + This list must always include the cluster's primary + zone. + + This corresponds to the ``locations`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, cluster) of the cluster to + set locations. Specified in the format + ``projects/*/locations/*/clusters/*``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + warnings.warn("ClusterManagerAsyncClient.set_locations is deprecated", + DeprecationWarning) + + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, locations, name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.SetLocationsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if name is not None: + request.name = name + if locations: + request.locations.extend(locations) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_locations, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_master(self, + request: cluster_service.UpdateMasterRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + master_version: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Updates the master for a specific cluster. + + Args: + request (:class:`google.container_v1.types.UpdateMasterRequest`): + The request object. UpdateMasterRequest updates the + master of the cluster. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster + to upgrade. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + master_version (:class:`str`): + Required. The Kubernetes version to + change the master to. + Users may specify either explicit + versions offered by Kubernetes Engine or + version aliases, which have the + following behavior: + - "latest": picks the highest valid + Kubernetes version - "1.X": picks the + highest valid patch+gke.N patch in the + 1.X version - "1.X.Y": picks the highest + valid gke.N patch in the 1.X.Y version - + "1.X.Y-gke.N": picks an explicit + Kubernetes version - "-": picks the + default Kubernetes version + + This corresponds to the ``master_version`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, cluster) of the cluster to + update. Specified in the format + ``projects/*/locations/*/clusters/*``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, master_version, name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.UpdateMasterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if master_version is not None: + request.master_version = master_version + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_master, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def set_master_auth(self, + request: cluster_service.SetMasterAuthRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets master auth materials. Currently supports + changing the admin password or a specific cluster, + either via password generation or explicitly setting the + password. + + Args: + request (:class:`google.container_v1.types.SetMasterAuthRequest`): + The request object. SetMasterAuthRequest updates the + admin password of a cluster. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + request = cluster_service.SetMasterAuthRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_master_auth, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_cluster(self, + request: cluster_service.DeleteClusterRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Deletes the cluster, including the Kubernetes + endpoint and all worker nodes. + + Firewalls and routes that were configured during cluster + creation are also deleted. + + Other Google Compute Engine resources that might be in + use by the cluster, such as load balancer resources, are + not deleted if they weren't present when the cluster was + initially created. + + Args: + request (:class:`google.container_v1.types.DeleteClusterRequest`): + The request object. DeleteClusterRequest deletes a + cluster. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster + to delete. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, cluster) of the cluster to + delete. Specified in the format + ``projects/*/locations/*/clusters/*``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.DeleteClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_cluster, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=20.0, + ), + default_timeout=20.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_operations(self, + request: cluster_service.ListOperationsRequest = None, + *, + project_id: str = None, + zone: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.ListOperationsResponse: + r"""Lists all operations in a project in a specific zone + or all zones. + + Args: + request (:class:`google.container_v1.types.ListOperationsRequest`): + The request object. ListOperationsRequest lists + operations. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the + parent field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + to return operations for, or ``-`` for all zones. This + field has been deprecated and replaced by the parent + field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.ListOperationsResponse: + ListOperationsResponse is the result + of ListOperationsRequest. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.ListOperationsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_operations, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=20.0, + ), + default_timeout=20.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_operation(self, + request: cluster_service.GetOperationRequest = None, + *, + project_id: str = None, + zone: str = None, + operation_id: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Gets the specified operation. + + Args: + request (:class:`google.container_v1.types.GetOperationRequest`): + The request object. GetOperationRequest gets a single + operation. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + operation_id (:class:`str`): + Deprecated. The server-assigned ``name`` of the + operation. This field has been deprecated and replaced + by the name field. + + This corresponds to the ``operation_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, operation id) of the + operation to get. Specified in the format + ``projects/*/locations/*/operations/*``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, operation_id, name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.GetOperationRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if operation_id is not None: + request.operation_id = operation_id + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_operation, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=20.0, + ), + default_timeout=20.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def cancel_operation(self, + request: cluster_service.CancelOperationRequest = None, + *, + project_id: str = None, + zone: str = None, + operation_id: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels the specified operation. + + Args: + request (:class:`google.container_v1.types.CancelOperationRequest`): + The request object. CancelOperationRequest cancels a + single operation. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the operation resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + operation_id (:class:`str`): + Deprecated. The server-assigned ``name`` of the + operation. This field has been deprecated and replaced + by the name field. + + This corresponds to the ``operation_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, operation id) of the + operation to cancel. Specified in the format + ``projects/*/locations/*/operations/*``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, operation_id, name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.CancelOperationRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if operation_id is not None: + request.operation_id = operation_id + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_operation, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def get_server_config(self, + request: cluster_service.GetServerConfigRequest = None, + *, + project_id: str = None, + zone: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.ServerConfig: + r"""Returns configuration info about the Google + Kubernetes Engine service. + + Args: + request (:class:`google.container_v1.types.GetServerConfigRequest`): + The request object. Gets the current Kubernetes Engine + service configuration. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + to return operations for. This field has been deprecated + and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project and location) of the server config to + get, specified in the format ``projects/*/locations/*``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.ServerConfig: + Kubernetes Engine service + configuration. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.GetServerConfigRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_server_config, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=20.0, + ), + default_timeout=20.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_json_web_keys(self, + request: cluster_service.GetJSONWebKeysRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.GetJSONWebKeysResponse: + r"""Gets the public component of the cluster signing keys + in JSON Web Key format. + This API is not yet intended for general use, and is not + available for all clusters. + + Args: + request (:class:`google.container_v1.types.GetJSONWebKeysRequest`): + The request object. GetJSONWebKeysRequest gets the + public component of the keys used by the cluster to sign + token requests. This will be the jwks_uri for the + discover document returned by getOpenIDConfig. See the + OpenID Connect Discovery 1.0 specification for details. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.GetJSONWebKeysResponse: + GetJSONWebKeysResponse is a valid + JSON Web Key Set as specififed in rfc + 7517 + + """ + # Create or coerce a protobuf request object. + request = cluster_service.GetJSONWebKeysRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_json_web_keys, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_node_pools(self, + request: cluster_service.ListNodePoolsRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.ListNodePoolsResponse: + r"""Lists the node pools for a cluster. + + Args: + request (:class:`google.container_v1.types.ListNodePoolsRequest`): + The request object. ListNodePoolsRequest lists the node + pool(s) for a cluster. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the + parent field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the parent field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster. + This field has been deprecated and + replaced by the parent field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + parent (:class:`str`): + The parent (project, location, cluster id) where the + node pools will be listed. Specified in the format + ``projects/*/locations/*/clusters/*``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.ListNodePoolsResponse: + ListNodePoolsResponse is the result + of ListNodePoolsRequest. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.ListNodePoolsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_node_pools, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=20.0, + ), + default_timeout=20.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_node_pool(self, + request: cluster_service.GetNodePoolRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + node_pool_id: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.NodePool: + r"""Retrieves the requested node pool. + + Args: + request (:class:`google.container_v1.types.GetNodePoolRequest`): + The request object. GetNodePoolRequest retrieves a node + pool for a cluster. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster. + This field has been deprecated and + replaced by the name field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_pool_id (:class:`str`): + Deprecated. The name of the node + pool. This field has been deprecated and + replaced by the name field. + + This corresponds to the ``node_pool_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, cluster, node pool id) of + the node pool to get. Specified in the format + ``projects/*/locations/*/clusters/*/nodePools/*``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.NodePool: + NodePool contains the name and + configuration for a cluster's node pool. + Node pools are a set of nodes (i.e. + VM's), with a common configuration and + specification, under the control of the + cluster master. They may have a set of + Kubernetes labels applied to them, which + may be used to reference them during pod + scheduling. They may also be resized up + or down, to accommodate the workload. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, node_pool_id, name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.GetNodePoolRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if node_pool_id is not None: + request.node_pool_id = node_pool_id + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_node_pool, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=20.0, + ), + default_timeout=20.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def create_node_pool(self, + request: cluster_service.CreateNodePoolRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + node_pool: cluster_service.NodePool = None, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Creates a node pool for a cluster. + + Args: + request (:class:`google.container_v1.types.CreateNodePoolRequest`): + The request object. CreateNodePoolRequest creates a node + pool for a cluster. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the + parent field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the parent field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster. + This field has been deprecated and + replaced by the parent field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_pool (:class:`google.container_v1.types.NodePool`): + Required. The node pool to create. + This corresponds to the ``node_pool`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + parent (:class:`str`): + The parent (project, location, cluster id) where the + node pool will be created. Specified in the format + ``projects/*/locations/*/clusters/*``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, node_pool, parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.CreateNodePoolRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if node_pool is not None: + request.node_pool = node_pool + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_node_pool, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_node_pool(self, + request: cluster_service.DeleteNodePoolRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + node_pool_id: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Deletes a node pool from a cluster. + + Args: + request (:class:`google.container_v1.types.DeleteNodePoolRequest`): + The request object. DeleteNodePoolRequest deletes a node + pool for a cluster. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster. + This field has been deprecated and + replaced by the name field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_pool_id (:class:`str`): + Deprecated. The name of the node pool + to delete. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``node_pool_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, cluster, node pool id) of + the node pool to delete. Specified in the format + ``projects/*/locations/*/clusters/*/nodePools/*``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, node_pool_id, name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.DeleteNodePoolRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if node_pool_id is not None: + request.node_pool_id = node_pool_id + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_node_pool, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=20.0, + ), + default_timeout=20.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def rollback_node_pool_upgrade(self, + request: cluster_service.RollbackNodePoolUpgradeRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + node_pool_id: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Rolls back a previously Aborted or Failed NodePool + upgrade. This makes no changes if the last upgrade + successfully completed. + + Args: + request (:class:`google.container_v1.types.RollbackNodePoolUpgradeRequest`): + The request object. RollbackNodePoolUpgradeRequest + rollbacks the previously Aborted or Failed NodePool + upgrade. This will be an no-op if the last upgrade + successfully completed. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster + to rollback. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_pool_id (:class:`str`): + Deprecated. The name of the node pool + to rollback. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``node_pool_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, cluster, node pool id) of + the node poll to rollback upgrade. Specified in the + format + ``projects/*/locations/*/clusters/*/nodePools/*``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, node_pool_id, name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.RollbackNodePoolUpgradeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if node_pool_id is not None: + request.node_pool_id = node_pool_id + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.rollback_node_pool_upgrade, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def set_node_pool_management(self, + request: cluster_service.SetNodePoolManagementRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the NodeManagement options for a node pool. + + Args: + request (:class:`google.container_v1.types.SetNodePoolManagementRequest`): + The request object. SetNodePoolManagementRequest sets + the node management properties of a node pool. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + request = cluster_service.SetNodePoolManagementRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_node_pool_management, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def set_labels(self, + request: cluster_service.SetLabelsRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets labels on a cluster. + + Args: + request (:class:`google.container_v1.types.SetLabelsRequest`): + The request object. SetLabelsRequest sets the Google + Cloud Platform labels on a Google Container Engine + cluster, which will in turn set them for Google Compute + Engine resources used by that cluster + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + request = cluster_service.SetLabelsRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_labels, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def set_legacy_abac(self, + request: cluster_service.SetLegacyAbacRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + enabled: bool = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Enables or disables the ABAC authorization mechanism + on a cluster. + + Args: + request (:class:`google.container_v1.types.SetLegacyAbacRequest`): + The request object. SetLegacyAbacRequest enables or + disables the ABAC authorization mechanism for a cluster. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster + to update. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + enabled (:class:`bool`): + Required. Whether ABAC authorization + will be enabled in the cluster. + + This corresponds to the ``enabled`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, cluster id) of the cluster + to set legacy abac. Specified in the format + ``projects/*/locations/*/clusters/*``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, enabled, name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.SetLegacyAbacRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if enabled is not None: + request.enabled = enabled + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_legacy_abac, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def start_ip_rotation(self, + request: cluster_service.StartIPRotationRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Starts master IP rotation. + + Args: + request (:class:`google.container_v1.types.StartIPRotationRequest`): + The request object. StartIPRotationRequest creates a new + IP for the cluster and then performs a node upgrade on + each node pool to point to the new IP. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster. + This field has been deprecated and + replaced by the name field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, cluster id) of the cluster + to start IP rotation. Specified in the format + ``projects/*/locations/*/clusters/*``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.StartIPRotationRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.start_ip_rotation, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def complete_ip_rotation(self, + request: cluster_service.CompleteIPRotationRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Completes master IP rotation. + + Args: + request (:class:`google.container_v1.types.CompleteIPRotationRequest`): + The request object. CompleteIPRotationRequest moves the + cluster master back into single-IP mode. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster. + This field has been deprecated and + replaced by the name field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, cluster id) of the cluster + to complete IP rotation. Specified in the format + ``projects/*/locations/*/clusters/*``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.CompleteIPRotationRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.complete_ip_rotation, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def set_node_pool_size(self, + request: cluster_service.SetNodePoolSizeRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the size for a specific node pool. + + Args: + request (:class:`google.container_v1.types.SetNodePoolSizeRequest`): + The request object. SetNodePoolSizeRequest sets the size + a node pool. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + request = cluster_service.SetNodePoolSizeRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_node_pool_size, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def set_network_policy(self, + request: cluster_service.SetNetworkPolicyRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + network_policy: cluster_service.NetworkPolicy = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Enables or disables Network Policy for a cluster. + + Args: + request (:class:`google.container_v1.types.SetNetworkPolicyRequest`): + The request object. SetNetworkPolicyRequest + enables/disables network policy for a cluster. + project_id (:class:`str`): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Deprecated. The name of the cluster. + This field has been deprecated and + replaced by the name field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + network_policy (:class:`google.container_v1.types.NetworkPolicy`): + Required. Configuration options for + the NetworkPolicy feature. + + This corresponds to the ``network_policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, cluster id) of the cluster + to set networking policy. Specified in the format + ``projects/*/locations/*/clusters/*``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, network_policy, name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.SetNetworkPolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if network_policy is not None: + request.network_policy = network_policy + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_network_policy, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def set_maintenance_policy(self, + request: cluster_service.SetMaintenancePolicyRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + maintenance_policy: cluster_service.MaintenancePolicy = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the maintenance policy for a cluster. + + Args: + request (:class:`google.container_v1.types.SetMaintenancePolicyRequest`): + The request object. SetMaintenancePolicyRequest sets the + maintenance policy for a cluster. + project_id (:class:`str`): + Required. The Google Developers Console `project ID or + project + number `__. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. The name of the cluster to + update. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + maintenance_policy (:class:`google.container_v1.types.MaintenancePolicy`): + Required. The maintenance policy to + be set for the cluster. An empty field + clears the existing maintenance policy. + + This corresponds to the ``maintenance_policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (:class:`str`): + The name (project, location, cluster id) of the cluster + to set maintenance policy. Specified in the format + ``projects/*/locations/*/clusters/*``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, maintenance_policy, name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.SetMaintenancePolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if maintenance_policy is not None: + request.maintenance_policy = maintenance_policy + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_maintenance_policy, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_usable_subnetworks(self, + request: cluster_service.ListUsableSubnetworksRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListUsableSubnetworksAsyncPager: + r"""Lists subnetworks that are usable for creating + clusters in a project. + + Args: + request (:class:`google.container_v1.types.ListUsableSubnetworksRequest`): + The request object. ListUsableSubnetworksRequest + requests the list of usable subnetworks available to a + user for creating clusters. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.services.cluster_manager.pagers.ListUsableSubnetworksAsyncPager: + ListUsableSubnetworksResponse is the + response of + ListUsableSubnetworksRequest. + + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + request = cluster_service.ListUsableSubnetworksRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_usable_subnetworks, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListUsableSubnetworksAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-container", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "ClusterManagerAsyncClient", +) diff --git a/owl-bot-staging/v1/google/container_v1/services/cluster_manager/client.py b/owl-bot-staging/v1/google/container_v1/services/cluster_manager/client.py new file mode 100644 index 00000000..2999b112 --- /dev/null +++ b/owl-bot-staging/v1/google/container_v1/services/cluster_manager/client.py @@ -0,0 +1,3731 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources +import warnings + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.container_v1.services.cluster_manager import pagers +from google.container_v1.types import cluster_service +from .transports.base import ClusterManagerTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import ClusterManagerGrpcTransport +from .transports.grpc_asyncio import ClusterManagerGrpcAsyncIOTransport + + +class ClusterManagerClientMeta(type): + """Metaclass for the ClusterManager client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[ClusterManagerTransport]] + _transport_registry["grpc"] = ClusterManagerGrpcTransport + _transport_registry["grpc_asyncio"] = ClusterManagerGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[ClusterManagerTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class ClusterManagerClient(metaclass=ClusterManagerClientMeta): + """Google Kubernetes Engine Cluster Manager v1""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "container.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ClusterManagerClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ClusterManagerClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> ClusterManagerTransport: + """Returns the transport used by the client instance. + + Returns: + ClusterManagerTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, ClusterManagerTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the cluster manager client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ClusterManagerTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, ClusterManagerTransport): + # transport is a ClusterManagerTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=( + Transport == type(self).get_transport_class("grpc") + or Transport == type(self).get_transport_class("grpc_asyncio") + ), + ) + + def list_clusters(self, + request: cluster_service.ListClustersRequest = None, + *, + project_id: str = None, + zone: str = None, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.ListClustersResponse: + r"""Lists all clusters owned by a project in either the + specified zone or all zones. + + Args: + request (google.container_v1.types.ListClustersRequest): + The request object. ListClustersRequest lists clusters. + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the + parent field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides, or "-" for all zones. This + field has been deprecated and replaced by the parent + field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + parent (str): + The parent (project and location) where the clusters + will be listed. Specified in the format + ``projects/*/locations/*``. Location "-" matches all + zones and all regions. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.ListClustersResponse: + ListClustersResponse is the result of + ListClustersRequest. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.ListClustersRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.ListClustersRequest): + request = cluster_service.ListClustersRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_clusters] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_cluster(self, + request: cluster_service.GetClusterRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Cluster: + r"""Gets the details of a specific cluster. + + Args: + request (google.container_v1.types.GetClusterRequest): + The request object. GetClusterRequest gets the settings + of a cluster. + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (str): + Deprecated. The name of the cluster + to retrieve. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (str): + The name (project, location, cluster) of the cluster to + retrieve. Specified in the format + ``projects/*/locations/*/clusters/*``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.Cluster: + A Google Kubernetes Engine cluster. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.GetClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.GetClusterRequest): + request = cluster_service.GetClusterRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_cluster] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def create_cluster(self, + request: cluster_service.CreateClusterRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster: cluster_service.Cluster = None, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Creates a cluster, consisting of the specified number and type + of Google Compute Engine instances. + + By default, the cluster is created in the project's `default + network `__. + + One firewall is added for the cluster. After cluster creation, + the Kubelet creates routes for each node to allow the containers + on that node to communicate with all other instances in the + cluster. + + Finally, an entry is added to the project's global metadata + indicating which CIDR range the cluster is using. + + Args: + request (google.container_v1.types.CreateClusterRequest): + The request object. CreateClusterRequest creates a + cluster. + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the + parent field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the parent field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster (google.container_v1.types.Cluster): + Required. A `cluster + resource `__ + + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + parent (str): + The parent (project and location) where the cluster will + be created. Specified in the format + ``projects/*/locations/*``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster, parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.CreateClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.CreateClusterRequest): + request = cluster_service.CreateClusterRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster is not None: + request.cluster = cluster + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_cluster] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_cluster(self, + request: cluster_service.UpdateClusterRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + update: cluster_service.ClusterUpdate = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Updates the settings of a specific cluster. + + Args: + request (google.container_v1.types.UpdateClusterRequest): + The request object. UpdateClusterRequest updates the + settings of a cluster. + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (str): + Deprecated. The name of the cluster + to upgrade. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update (google.container_v1.types.ClusterUpdate): + Required. A description of the + update. + + This corresponds to the ``update`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (str): + The name (project, location, cluster) of the cluster to + update. Specified in the format + ``projects/*/locations/*/clusters/*``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, update, name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.UpdateClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.UpdateClusterRequest): + request = cluster_service.UpdateClusterRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if update is not None: + request.update = update + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_cluster] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_node_pool(self, + request: cluster_service.UpdateNodePoolRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Updates the version and/or image type for the + specified node pool. + + Args: + request (google.container_v1.types.UpdateNodePoolRequest): + The request object. UpdateNodePoolRequests update a node + pool's image and/or version. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.UpdateNodePoolRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.UpdateNodePoolRequest): + request = cluster_service.UpdateNodePoolRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_node_pool] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_node_pool_autoscaling(self, + request: cluster_service.SetNodePoolAutoscalingRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the autoscaling settings for the specified node + pool. + + Args: + request (google.container_v1.types.SetNodePoolAutoscalingRequest): + The request object. SetNodePoolAutoscalingRequest sets + the autoscaler settings of a node pool. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.SetNodePoolAutoscalingRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.SetNodePoolAutoscalingRequest): + request = cluster_service.SetNodePoolAutoscalingRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_node_pool_autoscaling] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_logging_service(self, + request: cluster_service.SetLoggingServiceRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + logging_service: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the logging service for a specific cluster. + + Args: + request (google.container_v1.types.SetLoggingServiceRequest): + The request object. SetLoggingServiceRequest sets the + logging service of a cluster. + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (str): + Deprecated. The name of the cluster + to upgrade. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + logging_service (str): + Required. The logging service the cluster should use to + write logs. Currently available options: + + - ``logging.googleapis.com/kubernetes`` - The Cloud + Logging service with a Kubernetes-native resource + model + - ``logging.googleapis.com`` - The legacy Cloud Logging + service (no longer available as of GKE 1.15). + - ``none`` - no logs will be exported from the cluster. + + If left as an empty + string,\ ``logging.googleapis.com/kubernetes`` will be + used for GKE 1.14+ or ``logging.googleapis.com`` for + earlier versions. + + This corresponds to the ``logging_service`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (str): + The name (project, location, cluster) of the cluster to + set logging. Specified in the format + ``projects/*/locations/*/clusters/*``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, logging_service, name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.SetLoggingServiceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.SetLoggingServiceRequest): + request = cluster_service.SetLoggingServiceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if logging_service is not None: + request.logging_service = logging_service + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_logging_service] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_monitoring_service(self, + request: cluster_service.SetMonitoringServiceRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + monitoring_service: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the monitoring service for a specific cluster. + + Args: + request (google.container_v1.types.SetMonitoringServiceRequest): + The request object. SetMonitoringServiceRequest sets the + monitoring service of a cluster. + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (str): + Deprecated. The name of the cluster + to upgrade. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + monitoring_service (str): + Required. The monitoring service the cluster should use + to write metrics. Currently available options: + + - "monitoring.googleapis.com/kubernetes" - The Cloud + Monitoring service with a Kubernetes-native resource + model + - ``monitoring.googleapis.com`` - The legacy Cloud + Monitoring service (no longer available as of GKE + 1.15). + - ``none`` - No metrics will be exported from the + cluster. + + If left as an empty + string,\ ``monitoring.googleapis.com/kubernetes`` will + be used for GKE 1.14+ or ``monitoring.googleapis.com`` + for earlier versions. + + This corresponds to the ``monitoring_service`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (str): + The name (project, location, cluster) of the cluster to + set monitoring. Specified in the format + ``projects/*/locations/*/clusters/*``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, monitoring_service, name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.SetMonitoringServiceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.SetMonitoringServiceRequest): + request = cluster_service.SetMonitoringServiceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if monitoring_service is not None: + request.monitoring_service = monitoring_service + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_monitoring_service] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_addons_config(self, + request: cluster_service.SetAddonsConfigRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + addons_config: cluster_service.AddonsConfig = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the addons for a specific cluster. + + Args: + request (google.container_v1.types.SetAddonsConfigRequest): + The request object. SetAddonsConfigRequest sets the + addons associated with the cluster. + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (str): + Deprecated. The name of the cluster + to upgrade. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + addons_config (google.container_v1.types.AddonsConfig): + Required. The desired configurations + for the various addons available to run + in the cluster. + + This corresponds to the ``addons_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (str): + The name (project, location, cluster) of the cluster to + set addons. Specified in the format + ``projects/*/locations/*/clusters/*``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, addons_config, name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.SetAddonsConfigRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.SetAddonsConfigRequest): + request = cluster_service.SetAddonsConfigRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if addons_config is not None: + request.addons_config = addons_config + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_addons_config] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_locations(self, + request: cluster_service.SetLocationsRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + locations: Sequence[str] = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the locations for a specific cluster. Deprecated. Use + `projects.locations.clusters.update `__ + instead. + + Args: + request (google.container_v1.types.SetLocationsRequest): + The request object. SetLocationsRequest sets the + locations of the cluster. + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (str): + Deprecated. The name of the cluster + to upgrade. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + locations (Sequence[str]): + Required. The desired list of Google Compute Engine + `zones `__ + in which the cluster's nodes should be located. Changing + the locations a cluster is in will result in nodes being + either created or removed from the cluster, depending on + whether locations are being added or removed. + + This list must always include the cluster's primary + zone. + + This corresponds to the ``locations`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (str): + The name (project, location, cluster) of the cluster to + set locations. Specified in the format + ``projects/*/locations/*/clusters/*``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + warnings.warn("ClusterManagerClient.set_locations is deprecated", + DeprecationWarning) + + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, locations, name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.SetLocationsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.SetLocationsRequest): + request = cluster_service.SetLocationsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if locations is not None: + request.locations = locations + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_locations] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_master(self, + request: cluster_service.UpdateMasterRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + master_version: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Updates the master for a specific cluster. + + Args: + request (google.container_v1.types.UpdateMasterRequest): + The request object. UpdateMasterRequest updates the + master of the cluster. + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (str): + Deprecated. The name of the cluster + to upgrade. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + master_version (str): + Required. The Kubernetes version to + change the master to. + Users may specify either explicit + versions offered by Kubernetes Engine or + version aliases, which have the + following behavior: + - "latest": picks the highest valid + Kubernetes version - "1.X": picks the + highest valid patch+gke.N patch in the + 1.X version - "1.X.Y": picks the highest + valid gke.N patch in the 1.X.Y version - + "1.X.Y-gke.N": picks an explicit + Kubernetes version - "-": picks the + default Kubernetes version + + This corresponds to the ``master_version`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (str): + The name (project, location, cluster) of the cluster to + update. Specified in the format + ``projects/*/locations/*/clusters/*``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, master_version, name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.UpdateMasterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.UpdateMasterRequest): + request = cluster_service.UpdateMasterRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if master_version is not None: + request.master_version = master_version + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_master] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_master_auth(self, + request: cluster_service.SetMasterAuthRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets master auth materials. Currently supports + changing the admin password or a specific cluster, + either via password generation or explicitly setting the + password. + + Args: + request (google.container_v1.types.SetMasterAuthRequest): + The request object. SetMasterAuthRequest updates the + admin password of a cluster. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.SetMasterAuthRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.SetMasterAuthRequest): + request = cluster_service.SetMasterAuthRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_master_auth] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_cluster(self, + request: cluster_service.DeleteClusterRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Deletes the cluster, including the Kubernetes + endpoint and all worker nodes. + + Firewalls and routes that were configured during cluster + creation are also deleted. + + Other Google Compute Engine resources that might be in + use by the cluster, such as load balancer resources, are + not deleted if they weren't present when the cluster was + initially created. + + Args: + request (google.container_v1.types.DeleteClusterRequest): + The request object. DeleteClusterRequest deletes a + cluster. + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (str): + Deprecated. The name of the cluster + to delete. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (str): + The name (project, location, cluster) of the cluster to + delete. Specified in the format + ``projects/*/locations/*/clusters/*``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.DeleteClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.DeleteClusterRequest): + request = cluster_service.DeleteClusterRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_cluster] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_operations(self, + request: cluster_service.ListOperationsRequest = None, + *, + project_id: str = None, + zone: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.ListOperationsResponse: + r"""Lists all operations in a project in a specific zone + or all zones. + + Args: + request (google.container_v1.types.ListOperationsRequest): + The request object. ListOperationsRequest lists + operations. + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the + parent field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + to return operations for, or ``-`` for all zones. This + field has been deprecated and replaced by the parent + field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.ListOperationsResponse: + ListOperationsResponse is the result + of ListOperationsRequest. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.ListOperationsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.ListOperationsRequest): + request = cluster_service.ListOperationsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_operations] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_operation(self, + request: cluster_service.GetOperationRequest = None, + *, + project_id: str = None, + zone: str = None, + operation_id: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Gets the specified operation. + + Args: + request (google.container_v1.types.GetOperationRequest): + The request object. GetOperationRequest gets a single + operation. + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + operation_id (str): + Deprecated. The server-assigned ``name`` of the + operation. This field has been deprecated and replaced + by the name field. + + This corresponds to the ``operation_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (str): + The name (project, location, operation id) of the + operation to get. Specified in the format + ``projects/*/locations/*/operations/*``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, operation_id, name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.GetOperationRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.GetOperationRequest): + request = cluster_service.GetOperationRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if operation_id is not None: + request.operation_id = operation_id + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_operation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def cancel_operation(self, + request: cluster_service.CancelOperationRequest = None, + *, + project_id: str = None, + zone: str = None, + operation_id: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels the specified operation. + + Args: + request (google.container_v1.types.CancelOperationRequest): + The request object. CancelOperationRequest cancels a + single operation. + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the operation resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + operation_id (str): + Deprecated. The server-assigned ``name`` of the + operation. This field has been deprecated and replaced + by the name field. + + This corresponds to the ``operation_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (str): + The name (project, location, operation id) of the + operation to cancel. Specified in the format + ``projects/*/locations/*/operations/*``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, operation_id, name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.CancelOperationRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.CancelOperationRequest): + request = cluster_service.CancelOperationRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if operation_id is not None: + request.operation_id = operation_id + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.cancel_operation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def get_server_config(self, + request: cluster_service.GetServerConfigRequest = None, + *, + project_id: str = None, + zone: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.ServerConfig: + r"""Returns configuration info about the Google + Kubernetes Engine service. + + Args: + request (google.container_v1.types.GetServerConfigRequest): + The request object. Gets the current Kubernetes Engine + service configuration. + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + to return operations for. This field has been deprecated + and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (str): + The name (project and location) of the server config to + get, specified in the format ``projects/*/locations/*``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.ServerConfig: + Kubernetes Engine service + configuration. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.GetServerConfigRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.GetServerConfigRequest): + request = cluster_service.GetServerConfigRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_server_config] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_json_web_keys(self, + request: cluster_service.GetJSONWebKeysRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.GetJSONWebKeysResponse: + r"""Gets the public component of the cluster signing keys + in JSON Web Key format. + This API is not yet intended for general use, and is not + available for all clusters. + + Args: + request (google.container_v1.types.GetJSONWebKeysRequest): + The request object. GetJSONWebKeysRequest gets the + public component of the keys used by the cluster to sign + token requests. This will be the jwks_uri for the + discover document returned by getOpenIDConfig. See the + OpenID Connect Discovery 1.0 specification for details. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.GetJSONWebKeysResponse: + GetJSONWebKeysResponse is a valid + JSON Web Key Set as specififed in rfc + 7517 + + """ + # Create or coerce a protobuf request object. + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.GetJSONWebKeysRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.GetJSONWebKeysRequest): + request = cluster_service.GetJSONWebKeysRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_json_web_keys] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_node_pools(self, + request: cluster_service.ListNodePoolsRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.ListNodePoolsResponse: + r"""Lists the node pools for a cluster. + + Args: + request (google.container_v1.types.ListNodePoolsRequest): + The request object. ListNodePoolsRequest lists the node + pool(s) for a cluster. + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the + parent field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the parent field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (str): + Deprecated. The name of the cluster. + This field has been deprecated and + replaced by the parent field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + parent (str): + The parent (project, location, cluster id) where the + node pools will be listed. Specified in the format + ``projects/*/locations/*/clusters/*``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.ListNodePoolsResponse: + ListNodePoolsResponse is the result + of ListNodePoolsRequest. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.ListNodePoolsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.ListNodePoolsRequest): + request = cluster_service.ListNodePoolsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_node_pools] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_node_pool(self, + request: cluster_service.GetNodePoolRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + node_pool_id: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.NodePool: + r"""Retrieves the requested node pool. + + Args: + request (google.container_v1.types.GetNodePoolRequest): + The request object. GetNodePoolRequest retrieves a node + pool for a cluster. + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (str): + Deprecated. The name of the cluster. + This field has been deprecated and + replaced by the name field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_pool_id (str): + Deprecated. The name of the node + pool. This field has been deprecated and + replaced by the name field. + + This corresponds to the ``node_pool_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (str): + The name (project, location, cluster, node pool id) of + the node pool to get. Specified in the format + ``projects/*/locations/*/clusters/*/nodePools/*``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.NodePool: + NodePool contains the name and + configuration for a cluster's node pool. + Node pools are a set of nodes (i.e. + VM's), with a common configuration and + specification, under the control of the + cluster master. They may have a set of + Kubernetes labels applied to them, which + may be used to reference them during pod + scheduling. They may also be resized up + or down, to accommodate the workload. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, node_pool_id, name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.GetNodePoolRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.GetNodePoolRequest): + request = cluster_service.GetNodePoolRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if node_pool_id is not None: + request.node_pool_id = node_pool_id + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_node_pool] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def create_node_pool(self, + request: cluster_service.CreateNodePoolRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + node_pool: cluster_service.NodePool = None, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Creates a node pool for a cluster. + + Args: + request (google.container_v1.types.CreateNodePoolRequest): + The request object. CreateNodePoolRequest creates a node + pool for a cluster. + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the + parent field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the parent field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (str): + Deprecated. The name of the cluster. + This field has been deprecated and + replaced by the parent field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_pool (google.container_v1.types.NodePool): + Required. The node pool to create. + This corresponds to the ``node_pool`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + parent (str): + The parent (project, location, cluster id) where the + node pool will be created. Specified in the format + ``projects/*/locations/*/clusters/*``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, node_pool, parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.CreateNodePoolRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.CreateNodePoolRequest): + request = cluster_service.CreateNodePoolRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if node_pool is not None: + request.node_pool = node_pool + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_node_pool] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_node_pool(self, + request: cluster_service.DeleteNodePoolRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + node_pool_id: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Deletes a node pool from a cluster. + + Args: + request (google.container_v1.types.DeleteNodePoolRequest): + The request object. DeleteNodePoolRequest deletes a node + pool for a cluster. + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (str): + Deprecated. The name of the cluster. + This field has been deprecated and + replaced by the name field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_pool_id (str): + Deprecated. The name of the node pool + to delete. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``node_pool_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (str): + The name (project, location, cluster, node pool id) of + the node pool to delete. Specified in the format + ``projects/*/locations/*/clusters/*/nodePools/*``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, node_pool_id, name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.DeleteNodePoolRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.DeleteNodePoolRequest): + request = cluster_service.DeleteNodePoolRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if node_pool_id is not None: + request.node_pool_id = node_pool_id + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_node_pool] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def rollback_node_pool_upgrade(self, + request: cluster_service.RollbackNodePoolUpgradeRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + node_pool_id: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Rolls back a previously Aborted or Failed NodePool + upgrade. This makes no changes if the last upgrade + successfully completed. + + Args: + request (google.container_v1.types.RollbackNodePoolUpgradeRequest): + The request object. RollbackNodePoolUpgradeRequest + rollbacks the previously Aborted or Failed NodePool + upgrade. This will be an no-op if the last upgrade + successfully completed. + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (str): + Deprecated. The name of the cluster + to rollback. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_pool_id (str): + Deprecated. The name of the node pool + to rollback. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``node_pool_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (str): + The name (project, location, cluster, node pool id) of + the node poll to rollback upgrade. Specified in the + format + ``projects/*/locations/*/clusters/*/nodePools/*``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, node_pool_id, name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.RollbackNodePoolUpgradeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.RollbackNodePoolUpgradeRequest): + request = cluster_service.RollbackNodePoolUpgradeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if node_pool_id is not None: + request.node_pool_id = node_pool_id + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.rollback_node_pool_upgrade] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_node_pool_management(self, + request: cluster_service.SetNodePoolManagementRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the NodeManagement options for a node pool. + + Args: + request (google.container_v1.types.SetNodePoolManagementRequest): + The request object. SetNodePoolManagementRequest sets + the node management properties of a node pool. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.SetNodePoolManagementRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.SetNodePoolManagementRequest): + request = cluster_service.SetNodePoolManagementRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_node_pool_management] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_labels(self, + request: cluster_service.SetLabelsRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets labels on a cluster. + + Args: + request (google.container_v1.types.SetLabelsRequest): + The request object. SetLabelsRequest sets the Google + Cloud Platform labels on a Google Container Engine + cluster, which will in turn set them for Google Compute + Engine resources used by that cluster + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.SetLabelsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.SetLabelsRequest): + request = cluster_service.SetLabelsRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_labels] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_legacy_abac(self, + request: cluster_service.SetLegacyAbacRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + enabled: bool = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Enables or disables the ABAC authorization mechanism + on a cluster. + + Args: + request (google.container_v1.types.SetLegacyAbacRequest): + The request object. SetLegacyAbacRequest enables or + disables the ABAC authorization mechanism for a cluster. + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (str): + Deprecated. The name of the cluster + to update. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + enabled (bool): + Required. Whether ABAC authorization + will be enabled in the cluster. + + This corresponds to the ``enabled`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (str): + The name (project, location, cluster id) of the cluster + to set legacy abac. Specified in the format + ``projects/*/locations/*/clusters/*``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, enabled, name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.SetLegacyAbacRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.SetLegacyAbacRequest): + request = cluster_service.SetLegacyAbacRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if enabled is not None: + request.enabled = enabled + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_legacy_abac] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def start_ip_rotation(self, + request: cluster_service.StartIPRotationRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Starts master IP rotation. + + Args: + request (google.container_v1.types.StartIPRotationRequest): + The request object. StartIPRotationRequest creates a new + IP for the cluster and then performs a node upgrade on + each node pool to point to the new IP. + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (str): + Deprecated. The name of the cluster. + This field has been deprecated and + replaced by the name field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (str): + The name (project, location, cluster id) of the cluster + to start IP rotation. Specified in the format + ``projects/*/locations/*/clusters/*``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.StartIPRotationRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.StartIPRotationRequest): + request = cluster_service.StartIPRotationRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.start_ip_rotation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def complete_ip_rotation(self, + request: cluster_service.CompleteIPRotationRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Completes master IP rotation. + + Args: + request (google.container_v1.types.CompleteIPRotationRequest): + The request object. CompleteIPRotationRequest moves the + cluster master back into single-IP mode. + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (str): + Deprecated. The name of the cluster. + This field has been deprecated and + replaced by the name field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (str): + The name (project, location, cluster id) of the cluster + to complete IP rotation. Specified in the format + ``projects/*/locations/*/clusters/*``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.CompleteIPRotationRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.CompleteIPRotationRequest): + request = cluster_service.CompleteIPRotationRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.complete_ip_rotation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_node_pool_size(self, + request: cluster_service.SetNodePoolSizeRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the size for a specific node pool. + + Args: + request (google.container_v1.types.SetNodePoolSizeRequest): + The request object. SetNodePoolSizeRequest sets the size + a node pool. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.SetNodePoolSizeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.SetNodePoolSizeRequest): + request = cluster_service.SetNodePoolSizeRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_node_pool_size] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_network_policy(self, + request: cluster_service.SetNetworkPolicyRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + network_policy: cluster_service.NetworkPolicy = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Enables or disables Network Policy for a cluster. + + Args: + request (google.container_v1.types.SetNetworkPolicyRequest): + The request object. SetNetworkPolicyRequest + enables/disables network policy for a cluster. + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (str): + Deprecated. The name of the cluster. + This field has been deprecated and + replaced by the name field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + network_policy (google.container_v1.types.NetworkPolicy): + Required. Configuration options for + the NetworkPolicy feature. + + This corresponds to the ``network_policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (str): + The name (project, location, cluster id) of the cluster + to set networking policy. Specified in the format + ``projects/*/locations/*/clusters/*``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, network_policy, name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.SetNetworkPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.SetNetworkPolicyRequest): + request = cluster_service.SetNetworkPolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if network_policy is not None: + request.network_policy = network_policy + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_network_policy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_maintenance_policy(self, + request: cluster_service.SetMaintenancePolicyRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + maintenance_policy: cluster_service.MaintenancePolicy = None, + name: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the maintenance policy for a cluster. + + Args: + request (google.container_v1.types.SetMaintenancePolicyRequest): + The request object. SetMaintenancePolicyRequest sets the + maintenance policy for a cluster. + project_id (str): + Required. The Google Developers Console `project ID or + project + number `__. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Required. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (str): + Required. The name of the cluster to + update. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + maintenance_policy (google.container_v1.types.MaintenancePolicy): + Required. The maintenance policy to + be set for the cluster. An empty field + clears the existing maintenance policy. + + This corresponds to the ``maintenance_policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + name (str): + The name (project, location, cluster id) of the cluster + to set maintenance policy. Specified in the format + ``projects/*/locations/*/clusters/*``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, maintenance_policy, name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.SetMaintenancePolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.SetMaintenancePolicyRequest): + request = cluster_service.SetMaintenancePolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if maintenance_policy is not None: + request.maintenance_policy = maintenance_policy + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_maintenance_policy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_usable_subnetworks(self, + request: cluster_service.ListUsableSubnetworksRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListUsableSubnetworksPager: + r"""Lists subnetworks that are usable for creating + clusters in a project. + + Args: + request (google.container_v1.types.ListUsableSubnetworksRequest): + The request object. ListUsableSubnetworksRequest + requests the list of usable subnetworks available to a + user for creating clusters. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1.services.cluster_manager.pagers.ListUsableSubnetworksPager: + ListUsableSubnetworksResponse is the + response of + ListUsableSubnetworksRequest. + + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.ListUsableSubnetworksRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.ListUsableSubnetworksRequest): + request = cluster_service.ListUsableSubnetworksRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_usable_subnetworks] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListUsableSubnetworksPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-container", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "ClusterManagerClient", +) diff --git a/owl-bot-staging/v1/google/container_v1/services/cluster_manager/pagers.py b/owl-bot-staging/v1/google/container_v1/services/cluster_manager/pagers.py new file mode 100644 index 00000000..b8360b94 --- /dev/null +++ b/owl-bot-staging/v1/google/container_v1/services/cluster_manager/pagers.py @@ -0,0 +1,140 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional + +from google.container_v1.types import cluster_service + + +class ListUsableSubnetworksPager: + """A pager for iterating through ``list_usable_subnetworks`` requests. + + This class thinly wraps an initial + :class:`google.container_v1.types.ListUsableSubnetworksResponse` object, and + provides an ``__iter__`` method to iterate through its + ``subnetworks`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListUsableSubnetworks`` requests and continue to iterate + through the ``subnetworks`` field on the + corresponding responses. + + All the usual :class:`google.container_v1.types.ListUsableSubnetworksResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., cluster_service.ListUsableSubnetworksResponse], + request: cluster_service.ListUsableSubnetworksRequest, + response: cluster_service.ListUsableSubnetworksResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.container_v1.types.ListUsableSubnetworksRequest): + The initial request object. + response (google.container_v1.types.ListUsableSubnetworksResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = cluster_service.ListUsableSubnetworksRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[cluster_service.ListUsableSubnetworksResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[cluster_service.UsableSubnetwork]: + for page in self.pages: + yield from page.subnetworks + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListUsableSubnetworksAsyncPager: + """A pager for iterating through ``list_usable_subnetworks`` requests. + + This class thinly wraps an initial + :class:`google.container_v1.types.ListUsableSubnetworksResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``subnetworks`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListUsableSubnetworks`` requests and continue to iterate + through the ``subnetworks`` field on the + corresponding responses. + + All the usual :class:`google.container_v1.types.ListUsableSubnetworksResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[cluster_service.ListUsableSubnetworksResponse]], + request: cluster_service.ListUsableSubnetworksRequest, + response: cluster_service.ListUsableSubnetworksResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.container_v1.types.ListUsableSubnetworksRequest): + The initial request object. + response (google.container_v1.types.ListUsableSubnetworksResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = cluster_service.ListUsableSubnetworksRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[cluster_service.ListUsableSubnetworksResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[cluster_service.UsableSubnetwork]: + async def async_generator(): + async for page in self.pages: + for response in page.subnetworks: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/container_v1/services/cluster_manager/transports/__init__.py b/owl-bot-staging/v1/google/container_v1/services/cluster_manager/transports/__init__.py new file mode 100644 index 00000000..32ea8716 --- /dev/null +++ b/owl-bot-staging/v1/google/container_v1/services/cluster_manager/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import ClusterManagerTransport +from .grpc import ClusterManagerGrpcTransport +from .grpc_asyncio import ClusterManagerGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[ClusterManagerTransport]] +_transport_registry['grpc'] = ClusterManagerGrpcTransport +_transport_registry['grpc_asyncio'] = ClusterManagerGrpcAsyncIOTransport + +__all__ = ( + 'ClusterManagerTransport', + 'ClusterManagerGrpcTransport', + 'ClusterManagerGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/container_v1/services/cluster_manager/transports/base.py b/owl-bot-staging/v1/google/container_v1/services/cluster_manager/transports/base.py new file mode 100644 index 00000000..d026d49c --- /dev/null +++ b/owl-bot-staging/v1/google/container_v1/services/cluster_manager/transports/base.py @@ -0,0 +1,666 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import packaging.version +import pkg_resources + +import google.auth # type: ignore +import google.api_core # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.container_v1.types import cluster_service +from google.protobuf import empty_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-container', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + +try: + # google.auth.__version__ was added in 1.26.0 + _GOOGLE_AUTH_VERSION = google.auth.__version__ +except AttributeError: + try: # try pkg_resources if it is available + _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version + except pkg_resources.DistributionNotFound: # pragma: NO COVER + _GOOGLE_AUTH_VERSION = None + + +class ClusterManagerTransport(abc.ABC): + """Abstract transport class for ClusterManager.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'container.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials is service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # TODO(busunkim): This method is in the base transport + # to avoid duplicating code across the transport classes. These functions + # should be deleted once the minimum required versions of google-auth is increased. + + # TODO: Remove this function once google-auth >= 1.25.0 is required + @classmethod + def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: + """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" + + scopes_kwargs = {} + + if _GOOGLE_AUTH_VERSION and ( + packaging.version.parse(_GOOGLE_AUTH_VERSION) + >= packaging.version.parse("1.25.0") + ): + scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} + else: + scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} + + return scopes_kwargs + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.list_clusters: gapic_v1.method.wrap_method( + self.list_clusters, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=20.0, + ), + default_timeout=20.0, + client_info=client_info, + ), + self.get_cluster: gapic_v1.method.wrap_method( + self.get_cluster, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=20.0, + ), + default_timeout=20.0, + client_info=client_info, + ), + self.create_cluster: gapic_v1.method.wrap_method( + self.create_cluster, + default_timeout=45.0, + client_info=client_info, + ), + self.update_cluster: gapic_v1.method.wrap_method( + self.update_cluster, + default_timeout=45.0, + client_info=client_info, + ), + self.update_node_pool: gapic_v1.method.wrap_method( + self.update_node_pool, + default_timeout=45.0, + client_info=client_info, + ), + self.set_node_pool_autoscaling: gapic_v1.method.wrap_method( + self.set_node_pool_autoscaling, + default_timeout=45.0, + client_info=client_info, + ), + self.set_logging_service: gapic_v1.method.wrap_method( + self.set_logging_service, + default_timeout=45.0, + client_info=client_info, + ), + self.set_monitoring_service: gapic_v1.method.wrap_method( + self.set_monitoring_service, + default_timeout=45.0, + client_info=client_info, + ), + self.set_addons_config: gapic_v1.method.wrap_method( + self.set_addons_config, + default_timeout=45.0, + client_info=client_info, + ), + self.set_locations: gapic_v1.method.wrap_method( + self.set_locations, + default_timeout=45.0, + client_info=client_info, + ), + self.update_master: gapic_v1.method.wrap_method( + self.update_master, + default_timeout=45.0, + client_info=client_info, + ), + self.set_master_auth: gapic_v1.method.wrap_method( + self.set_master_auth, + default_timeout=45.0, + client_info=client_info, + ), + self.delete_cluster: gapic_v1.method.wrap_method( + self.delete_cluster, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=20.0, + ), + default_timeout=20.0, + client_info=client_info, + ), + self.list_operations: gapic_v1.method.wrap_method( + self.list_operations, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=20.0, + ), + default_timeout=20.0, + client_info=client_info, + ), + self.get_operation: gapic_v1.method.wrap_method( + self.get_operation, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=20.0, + ), + default_timeout=20.0, + client_info=client_info, + ), + self.cancel_operation: gapic_v1.method.wrap_method( + self.cancel_operation, + default_timeout=45.0, + client_info=client_info, + ), + self.get_server_config: gapic_v1.method.wrap_method( + self.get_server_config, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=20.0, + ), + default_timeout=20.0, + client_info=client_info, + ), + self.get_json_web_keys: gapic_v1.method.wrap_method( + self.get_json_web_keys, + default_timeout=None, + client_info=client_info, + ), + self.list_node_pools: gapic_v1.method.wrap_method( + self.list_node_pools, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=20.0, + ), + default_timeout=20.0, + client_info=client_info, + ), + self.get_node_pool: gapic_v1.method.wrap_method( + self.get_node_pool, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=20.0, + ), + default_timeout=20.0, + client_info=client_info, + ), + self.create_node_pool: gapic_v1.method.wrap_method( + self.create_node_pool, + default_timeout=45.0, + client_info=client_info, + ), + self.delete_node_pool: gapic_v1.method.wrap_method( + self.delete_node_pool, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=20.0, + ), + default_timeout=20.0, + client_info=client_info, + ), + self.rollback_node_pool_upgrade: gapic_v1.method.wrap_method( + self.rollback_node_pool_upgrade, + default_timeout=45.0, + client_info=client_info, + ), + self.set_node_pool_management: gapic_v1.method.wrap_method( + self.set_node_pool_management, + default_timeout=45.0, + client_info=client_info, + ), + self.set_labels: gapic_v1.method.wrap_method( + self.set_labels, + default_timeout=45.0, + client_info=client_info, + ), + self.set_legacy_abac: gapic_v1.method.wrap_method( + self.set_legacy_abac, + default_timeout=45.0, + client_info=client_info, + ), + self.start_ip_rotation: gapic_v1.method.wrap_method( + self.start_ip_rotation, + default_timeout=45.0, + client_info=client_info, + ), + self.complete_ip_rotation: gapic_v1.method.wrap_method( + self.complete_ip_rotation, + default_timeout=45.0, + client_info=client_info, + ), + self.set_node_pool_size: gapic_v1.method.wrap_method( + self.set_node_pool_size, + default_timeout=45.0, + client_info=client_info, + ), + self.set_network_policy: gapic_v1.method.wrap_method( + self.set_network_policy, + default_timeout=45.0, + client_info=client_info, + ), + self.set_maintenance_policy: gapic_v1.method.wrap_method( + self.set_maintenance_policy, + default_timeout=45.0, + client_info=client_info, + ), + self.list_usable_subnetworks: gapic_v1.method.wrap_method( + self.list_usable_subnetworks, + default_timeout=None, + client_info=client_info, + ), + } + + @property + def list_clusters(self) -> Callable[ + [cluster_service.ListClustersRequest], + Union[ + cluster_service.ListClustersResponse, + Awaitable[cluster_service.ListClustersResponse] + ]]: + raise NotImplementedError() + + @property + def get_cluster(self) -> Callable[ + [cluster_service.GetClusterRequest], + Union[ + cluster_service.Cluster, + Awaitable[cluster_service.Cluster] + ]]: + raise NotImplementedError() + + @property + def create_cluster(self) -> Callable[ + [cluster_service.CreateClusterRequest], + Union[ + cluster_service.Operation, + Awaitable[cluster_service.Operation] + ]]: + raise NotImplementedError() + + @property + def update_cluster(self) -> Callable[ + [cluster_service.UpdateClusterRequest], + Union[ + cluster_service.Operation, + Awaitable[cluster_service.Operation] + ]]: + raise NotImplementedError() + + @property + def update_node_pool(self) -> Callable[ + [cluster_service.UpdateNodePoolRequest], + Union[ + cluster_service.Operation, + Awaitable[cluster_service.Operation] + ]]: + raise NotImplementedError() + + @property + def set_node_pool_autoscaling(self) -> Callable[ + [cluster_service.SetNodePoolAutoscalingRequest], + Union[ + cluster_service.Operation, + Awaitable[cluster_service.Operation] + ]]: + raise NotImplementedError() + + @property + def set_logging_service(self) -> Callable[ + [cluster_service.SetLoggingServiceRequest], + Union[ + cluster_service.Operation, + Awaitable[cluster_service.Operation] + ]]: + raise NotImplementedError() + + @property + def set_monitoring_service(self) -> Callable[ + [cluster_service.SetMonitoringServiceRequest], + Union[ + cluster_service.Operation, + Awaitable[cluster_service.Operation] + ]]: + raise NotImplementedError() + + @property + def set_addons_config(self) -> Callable[ + [cluster_service.SetAddonsConfigRequest], + Union[ + cluster_service.Operation, + Awaitable[cluster_service.Operation] + ]]: + raise NotImplementedError() + + @property + def set_locations(self) -> Callable[ + [cluster_service.SetLocationsRequest], + Union[ + cluster_service.Operation, + Awaitable[cluster_service.Operation] + ]]: + raise NotImplementedError() + + @property + def update_master(self) -> Callable[ + [cluster_service.UpdateMasterRequest], + Union[ + cluster_service.Operation, + Awaitable[cluster_service.Operation] + ]]: + raise NotImplementedError() + + @property + def set_master_auth(self) -> Callable[ + [cluster_service.SetMasterAuthRequest], + Union[ + cluster_service.Operation, + Awaitable[cluster_service.Operation] + ]]: + raise NotImplementedError() + + @property + def delete_cluster(self) -> Callable[ + [cluster_service.DeleteClusterRequest], + Union[ + cluster_service.Operation, + Awaitable[cluster_service.Operation] + ]]: + raise NotImplementedError() + + @property + def list_operations(self) -> Callable[ + [cluster_service.ListOperationsRequest], + Union[ + cluster_service.ListOperationsResponse, + Awaitable[cluster_service.ListOperationsResponse] + ]]: + raise NotImplementedError() + + @property + def get_operation(self) -> Callable[ + [cluster_service.GetOperationRequest], + Union[ + cluster_service.Operation, + Awaitable[cluster_service.Operation] + ]]: + raise NotImplementedError() + + @property + def cancel_operation(self) -> Callable[ + [cluster_service.CancelOperationRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + @property + def get_server_config(self) -> Callable[ + [cluster_service.GetServerConfigRequest], + Union[ + cluster_service.ServerConfig, + Awaitable[cluster_service.ServerConfig] + ]]: + raise NotImplementedError() + + @property + def get_json_web_keys(self) -> Callable[ + [cluster_service.GetJSONWebKeysRequest], + Union[ + cluster_service.GetJSONWebKeysResponse, + Awaitable[cluster_service.GetJSONWebKeysResponse] + ]]: + raise NotImplementedError() + + @property + def list_node_pools(self) -> Callable[ + [cluster_service.ListNodePoolsRequest], + Union[ + cluster_service.ListNodePoolsResponse, + Awaitable[cluster_service.ListNodePoolsResponse] + ]]: + raise NotImplementedError() + + @property + def get_node_pool(self) -> Callable[ + [cluster_service.GetNodePoolRequest], + Union[ + cluster_service.NodePool, + Awaitable[cluster_service.NodePool] + ]]: + raise NotImplementedError() + + @property + def create_node_pool(self) -> Callable[ + [cluster_service.CreateNodePoolRequest], + Union[ + cluster_service.Operation, + Awaitable[cluster_service.Operation] + ]]: + raise NotImplementedError() + + @property + def delete_node_pool(self) -> Callable[ + [cluster_service.DeleteNodePoolRequest], + Union[ + cluster_service.Operation, + Awaitable[cluster_service.Operation] + ]]: + raise NotImplementedError() + + @property + def rollback_node_pool_upgrade(self) -> Callable[ + [cluster_service.RollbackNodePoolUpgradeRequest], + Union[ + cluster_service.Operation, + Awaitable[cluster_service.Operation] + ]]: + raise NotImplementedError() + + @property + def set_node_pool_management(self) -> Callable[ + [cluster_service.SetNodePoolManagementRequest], + Union[ + cluster_service.Operation, + Awaitable[cluster_service.Operation] + ]]: + raise NotImplementedError() + + @property + def set_labels(self) -> Callable[ + [cluster_service.SetLabelsRequest], + Union[ + cluster_service.Operation, + Awaitable[cluster_service.Operation] + ]]: + raise NotImplementedError() + + @property + def set_legacy_abac(self) -> Callable[ + [cluster_service.SetLegacyAbacRequest], + Union[ + cluster_service.Operation, + Awaitable[cluster_service.Operation] + ]]: + raise NotImplementedError() + + @property + def start_ip_rotation(self) -> Callable[ + [cluster_service.StartIPRotationRequest], + Union[ + cluster_service.Operation, + Awaitable[cluster_service.Operation] + ]]: + raise NotImplementedError() + + @property + def complete_ip_rotation(self) -> Callable[ + [cluster_service.CompleteIPRotationRequest], + Union[ + cluster_service.Operation, + Awaitable[cluster_service.Operation] + ]]: + raise NotImplementedError() + + @property + def set_node_pool_size(self) -> Callable[ + [cluster_service.SetNodePoolSizeRequest], + Union[ + cluster_service.Operation, + Awaitable[cluster_service.Operation] + ]]: + raise NotImplementedError() + + @property + def set_network_policy(self) -> Callable[ + [cluster_service.SetNetworkPolicyRequest], + Union[ + cluster_service.Operation, + Awaitable[cluster_service.Operation] + ]]: + raise NotImplementedError() + + @property + def set_maintenance_policy(self) -> Callable[ + [cluster_service.SetMaintenancePolicyRequest], + Union[ + cluster_service.Operation, + Awaitable[cluster_service.Operation] + ]]: + raise NotImplementedError() + + @property + def list_usable_subnetworks(self) -> Callable[ + [cluster_service.ListUsableSubnetworksRequest], + Union[ + cluster_service.ListUsableSubnetworksResponse, + Awaitable[cluster_service.ListUsableSubnetworksResponse] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'ClusterManagerTransport', +) diff --git a/owl-bot-staging/v1/google/container_v1/services/cluster_manager/transports/grpc.py b/owl-bot-staging/v1/google/container_v1/services/cluster_manager/transports/grpc.py new file mode 100644 index 00000000..f690132c --- /dev/null +++ b/owl-bot-staging/v1/google/container_v1/services/cluster_manager/transports/grpc.py @@ -0,0 +1,1097 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import gapic_v1 # type: ignore +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.container_v1.types import cluster_service +from google.protobuf import empty_pb2 # type: ignore +from .base import ClusterManagerTransport, DEFAULT_CLIENT_INFO + + +class ClusterManagerGrpcTransport(ClusterManagerTransport): + """gRPC backend transport for ClusterManager. + + Google Kubernetes Engine Cluster Manager v1 + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'container.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'container.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def list_clusters(self) -> Callable[ + [cluster_service.ListClustersRequest], + cluster_service.ListClustersResponse]: + r"""Return a callable for the list clusters method over gRPC. + + Lists all clusters owned by a project in either the + specified zone or all zones. + + Returns: + Callable[[~.ListClustersRequest], + ~.ListClustersResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_clusters' not in self._stubs: + self._stubs['list_clusters'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/ListClusters', + request_serializer=cluster_service.ListClustersRequest.serialize, + response_deserializer=cluster_service.ListClustersResponse.deserialize, + ) + return self._stubs['list_clusters'] + + @property + def get_cluster(self) -> Callable[ + [cluster_service.GetClusterRequest], + cluster_service.Cluster]: + r"""Return a callable for the get cluster method over gRPC. + + Gets the details of a specific cluster. + + Returns: + Callable[[~.GetClusterRequest], + ~.Cluster]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_cluster' not in self._stubs: + self._stubs['get_cluster'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/GetCluster', + request_serializer=cluster_service.GetClusterRequest.serialize, + response_deserializer=cluster_service.Cluster.deserialize, + ) + return self._stubs['get_cluster'] + + @property + def create_cluster(self) -> Callable[ + [cluster_service.CreateClusterRequest], + cluster_service.Operation]: + r"""Return a callable for the create cluster method over gRPC. + + Creates a cluster, consisting of the specified number and type + of Google Compute Engine instances. + + By default, the cluster is created in the project's `default + network `__. + + One firewall is added for the cluster. After cluster creation, + the Kubelet creates routes for each node to allow the containers + on that node to communicate with all other instances in the + cluster. + + Finally, an entry is added to the project's global metadata + indicating which CIDR range the cluster is using. + + Returns: + Callable[[~.CreateClusterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_cluster' not in self._stubs: + self._stubs['create_cluster'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/CreateCluster', + request_serializer=cluster_service.CreateClusterRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['create_cluster'] + + @property + def update_cluster(self) -> Callable[ + [cluster_service.UpdateClusterRequest], + cluster_service.Operation]: + r"""Return a callable for the update cluster method over gRPC. + + Updates the settings of a specific cluster. + + Returns: + Callable[[~.UpdateClusterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_cluster' not in self._stubs: + self._stubs['update_cluster'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/UpdateCluster', + request_serializer=cluster_service.UpdateClusterRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['update_cluster'] + + @property + def update_node_pool(self) -> Callable[ + [cluster_service.UpdateNodePoolRequest], + cluster_service.Operation]: + r"""Return a callable for the update node pool method over gRPC. + + Updates the version and/or image type for the + specified node pool. + + Returns: + Callable[[~.UpdateNodePoolRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_node_pool' not in self._stubs: + self._stubs['update_node_pool'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/UpdateNodePool', + request_serializer=cluster_service.UpdateNodePoolRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['update_node_pool'] + + @property + def set_node_pool_autoscaling(self) -> Callable[ + [cluster_service.SetNodePoolAutoscalingRequest], + cluster_service.Operation]: + r"""Return a callable for the set node pool autoscaling method over gRPC. + + Sets the autoscaling settings for the specified node + pool. + + Returns: + Callable[[~.SetNodePoolAutoscalingRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_node_pool_autoscaling' not in self._stubs: + self._stubs['set_node_pool_autoscaling'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/SetNodePoolAutoscaling', + request_serializer=cluster_service.SetNodePoolAutoscalingRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['set_node_pool_autoscaling'] + + @property + def set_logging_service(self) -> Callable[ + [cluster_service.SetLoggingServiceRequest], + cluster_service.Operation]: + r"""Return a callable for the set logging service method over gRPC. + + Sets the logging service for a specific cluster. + + Returns: + Callable[[~.SetLoggingServiceRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_logging_service' not in self._stubs: + self._stubs['set_logging_service'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/SetLoggingService', + request_serializer=cluster_service.SetLoggingServiceRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['set_logging_service'] + + @property + def set_monitoring_service(self) -> Callable[ + [cluster_service.SetMonitoringServiceRequest], + cluster_service.Operation]: + r"""Return a callable for the set monitoring service method over gRPC. + + Sets the monitoring service for a specific cluster. + + Returns: + Callable[[~.SetMonitoringServiceRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_monitoring_service' not in self._stubs: + self._stubs['set_monitoring_service'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/SetMonitoringService', + request_serializer=cluster_service.SetMonitoringServiceRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['set_monitoring_service'] + + @property + def set_addons_config(self) -> Callable[ + [cluster_service.SetAddonsConfigRequest], + cluster_service.Operation]: + r"""Return a callable for the set addons config method over gRPC. + + Sets the addons for a specific cluster. + + Returns: + Callable[[~.SetAddonsConfigRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_addons_config' not in self._stubs: + self._stubs['set_addons_config'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/SetAddonsConfig', + request_serializer=cluster_service.SetAddonsConfigRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['set_addons_config'] + + @property + def set_locations(self) -> Callable[ + [cluster_service.SetLocationsRequest], + cluster_service.Operation]: + r"""Return a callable for the set locations method over gRPC. + + Sets the locations for a specific cluster. Deprecated. Use + `projects.locations.clusters.update `__ + instead. + + Returns: + Callable[[~.SetLocationsRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_locations' not in self._stubs: + self._stubs['set_locations'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/SetLocations', + request_serializer=cluster_service.SetLocationsRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['set_locations'] + + @property + def update_master(self) -> Callable[ + [cluster_service.UpdateMasterRequest], + cluster_service.Operation]: + r"""Return a callable for the update master method over gRPC. + + Updates the master for a specific cluster. + + Returns: + Callable[[~.UpdateMasterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_master' not in self._stubs: + self._stubs['update_master'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/UpdateMaster', + request_serializer=cluster_service.UpdateMasterRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['update_master'] + + @property + def set_master_auth(self) -> Callable[ + [cluster_service.SetMasterAuthRequest], + cluster_service.Operation]: + r"""Return a callable for the set master auth method over gRPC. + + Sets master auth materials. Currently supports + changing the admin password or a specific cluster, + either via password generation or explicitly setting the + password. + + Returns: + Callable[[~.SetMasterAuthRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_master_auth' not in self._stubs: + self._stubs['set_master_auth'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/SetMasterAuth', + request_serializer=cluster_service.SetMasterAuthRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['set_master_auth'] + + @property + def delete_cluster(self) -> Callable[ + [cluster_service.DeleteClusterRequest], + cluster_service.Operation]: + r"""Return a callable for the delete cluster method over gRPC. + + Deletes the cluster, including the Kubernetes + endpoint and all worker nodes. + + Firewalls and routes that were configured during cluster + creation are also deleted. + + Other Google Compute Engine resources that might be in + use by the cluster, such as load balancer resources, are + not deleted if they weren't present when the cluster was + initially created. + + Returns: + Callable[[~.DeleteClusterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_cluster' not in self._stubs: + self._stubs['delete_cluster'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/DeleteCluster', + request_serializer=cluster_service.DeleteClusterRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['delete_cluster'] + + @property + def list_operations(self) -> Callable[ + [cluster_service.ListOperationsRequest], + cluster_service.ListOperationsResponse]: + r"""Return a callable for the list operations method over gRPC. + + Lists all operations in a project in a specific zone + or all zones. + + Returns: + Callable[[~.ListOperationsRequest], + ~.ListOperationsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_operations' not in self._stubs: + self._stubs['list_operations'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/ListOperations', + request_serializer=cluster_service.ListOperationsRequest.serialize, + response_deserializer=cluster_service.ListOperationsResponse.deserialize, + ) + return self._stubs['list_operations'] + + @property + def get_operation(self) -> Callable[ + [cluster_service.GetOperationRequest], + cluster_service.Operation]: + r"""Return a callable for the get operation method over gRPC. + + Gets the specified operation. + + Returns: + Callable[[~.GetOperationRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_operation' not in self._stubs: + self._stubs['get_operation'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/GetOperation', + request_serializer=cluster_service.GetOperationRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['get_operation'] + + @property + def cancel_operation(self) -> Callable[ + [cluster_service.CancelOperationRequest], + empty_pb2.Empty]: + r"""Return a callable for the cancel operation method over gRPC. + + Cancels the specified operation. + + Returns: + Callable[[~.CancelOperationRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_operation' not in self._stubs: + self._stubs['cancel_operation'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/CancelOperation', + request_serializer=cluster_service.CancelOperationRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_operation'] + + @property + def get_server_config(self) -> Callable[ + [cluster_service.GetServerConfigRequest], + cluster_service.ServerConfig]: + r"""Return a callable for the get server config method over gRPC. + + Returns configuration info about the Google + Kubernetes Engine service. + + Returns: + Callable[[~.GetServerConfigRequest], + ~.ServerConfig]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_server_config' not in self._stubs: + self._stubs['get_server_config'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/GetServerConfig', + request_serializer=cluster_service.GetServerConfigRequest.serialize, + response_deserializer=cluster_service.ServerConfig.deserialize, + ) + return self._stubs['get_server_config'] + + @property + def get_json_web_keys(self) -> Callable[ + [cluster_service.GetJSONWebKeysRequest], + cluster_service.GetJSONWebKeysResponse]: + r"""Return a callable for the get json web keys method over gRPC. + + Gets the public component of the cluster signing keys + in JSON Web Key format. + This API is not yet intended for general use, and is not + available for all clusters. + + Returns: + Callable[[~.GetJSONWebKeysRequest], + ~.GetJSONWebKeysResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_json_web_keys' not in self._stubs: + self._stubs['get_json_web_keys'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/GetJSONWebKeys', + request_serializer=cluster_service.GetJSONWebKeysRequest.serialize, + response_deserializer=cluster_service.GetJSONWebKeysResponse.deserialize, + ) + return self._stubs['get_json_web_keys'] + + @property + def list_node_pools(self) -> Callable[ + [cluster_service.ListNodePoolsRequest], + cluster_service.ListNodePoolsResponse]: + r"""Return a callable for the list node pools method over gRPC. + + Lists the node pools for a cluster. + + Returns: + Callable[[~.ListNodePoolsRequest], + ~.ListNodePoolsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_node_pools' not in self._stubs: + self._stubs['list_node_pools'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/ListNodePools', + request_serializer=cluster_service.ListNodePoolsRequest.serialize, + response_deserializer=cluster_service.ListNodePoolsResponse.deserialize, + ) + return self._stubs['list_node_pools'] + + @property + def get_node_pool(self) -> Callable[ + [cluster_service.GetNodePoolRequest], + cluster_service.NodePool]: + r"""Return a callable for the get node pool method over gRPC. + + Retrieves the requested node pool. + + Returns: + Callable[[~.GetNodePoolRequest], + ~.NodePool]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_node_pool' not in self._stubs: + self._stubs['get_node_pool'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/GetNodePool', + request_serializer=cluster_service.GetNodePoolRequest.serialize, + response_deserializer=cluster_service.NodePool.deserialize, + ) + return self._stubs['get_node_pool'] + + @property + def create_node_pool(self) -> Callable[ + [cluster_service.CreateNodePoolRequest], + cluster_service.Operation]: + r"""Return a callable for the create node pool method over gRPC. + + Creates a node pool for a cluster. + + Returns: + Callable[[~.CreateNodePoolRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_node_pool' not in self._stubs: + self._stubs['create_node_pool'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/CreateNodePool', + request_serializer=cluster_service.CreateNodePoolRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['create_node_pool'] + + @property + def delete_node_pool(self) -> Callable[ + [cluster_service.DeleteNodePoolRequest], + cluster_service.Operation]: + r"""Return a callable for the delete node pool method over gRPC. + + Deletes a node pool from a cluster. + + Returns: + Callable[[~.DeleteNodePoolRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_node_pool' not in self._stubs: + self._stubs['delete_node_pool'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/DeleteNodePool', + request_serializer=cluster_service.DeleteNodePoolRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['delete_node_pool'] + + @property + def rollback_node_pool_upgrade(self) -> Callable[ + [cluster_service.RollbackNodePoolUpgradeRequest], + cluster_service.Operation]: + r"""Return a callable for the rollback node pool upgrade method over gRPC. + + Rolls back a previously Aborted or Failed NodePool + upgrade. This makes no changes if the last upgrade + successfully completed. + + Returns: + Callable[[~.RollbackNodePoolUpgradeRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'rollback_node_pool_upgrade' not in self._stubs: + self._stubs['rollback_node_pool_upgrade'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/RollbackNodePoolUpgrade', + request_serializer=cluster_service.RollbackNodePoolUpgradeRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['rollback_node_pool_upgrade'] + + @property + def set_node_pool_management(self) -> Callable[ + [cluster_service.SetNodePoolManagementRequest], + cluster_service.Operation]: + r"""Return a callable for the set node pool management method over gRPC. + + Sets the NodeManagement options for a node pool. + + Returns: + Callable[[~.SetNodePoolManagementRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_node_pool_management' not in self._stubs: + self._stubs['set_node_pool_management'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/SetNodePoolManagement', + request_serializer=cluster_service.SetNodePoolManagementRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['set_node_pool_management'] + + @property + def set_labels(self) -> Callable[ + [cluster_service.SetLabelsRequest], + cluster_service.Operation]: + r"""Return a callable for the set labels method over gRPC. + + Sets labels on a cluster. + + Returns: + Callable[[~.SetLabelsRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_labels' not in self._stubs: + self._stubs['set_labels'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/SetLabels', + request_serializer=cluster_service.SetLabelsRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['set_labels'] + + @property + def set_legacy_abac(self) -> Callable[ + [cluster_service.SetLegacyAbacRequest], + cluster_service.Operation]: + r"""Return a callable for the set legacy abac method over gRPC. + + Enables or disables the ABAC authorization mechanism + on a cluster. + + Returns: + Callable[[~.SetLegacyAbacRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_legacy_abac' not in self._stubs: + self._stubs['set_legacy_abac'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/SetLegacyAbac', + request_serializer=cluster_service.SetLegacyAbacRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['set_legacy_abac'] + + @property + def start_ip_rotation(self) -> Callable[ + [cluster_service.StartIPRotationRequest], + cluster_service.Operation]: + r"""Return a callable for the start ip rotation method over gRPC. + + Starts master IP rotation. + + Returns: + Callable[[~.StartIPRotationRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'start_ip_rotation' not in self._stubs: + self._stubs['start_ip_rotation'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/StartIPRotation', + request_serializer=cluster_service.StartIPRotationRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['start_ip_rotation'] + + @property + def complete_ip_rotation(self) -> Callable[ + [cluster_service.CompleteIPRotationRequest], + cluster_service.Operation]: + r"""Return a callable for the complete ip rotation method over gRPC. + + Completes master IP rotation. + + Returns: + Callable[[~.CompleteIPRotationRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'complete_ip_rotation' not in self._stubs: + self._stubs['complete_ip_rotation'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/CompleteIPRotation', + request_serializer=cluster_service.CompleteIPRotationRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['complete_ip_rotation'] + + @property + def set_node_pool_size(self) -> Callable[ + [cluster_service.SetNodePoolSizeRequest], + cluster_service.Operation]: + r"""Return a callable for the set node pool size method over gRPC. + + Sets the size for a specific node pool. + + Returns: + Callable[[~.SetNodePoolSizeRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_node_pool_size' not in self._stubs: + self._stubs['set_node_pool_size'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/SetNodePoolSize', + request_serializer=cluster_service.SetNodePoolSizeRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['set_node_pool_size'] + + @property + def set_network_policy(self) -> Callable[ + [cluster_service.SetNetworkPolicyRequest], + cluster_service.Operation]: + r"""Return a callable for the set network policy method over gRPC. + + Enables or disables Network Policy for a cluster. + + Returns: + Callable[[~.SetNetworkPolicyRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_network_policy' not in self._stubs: + self._stubs['set_network_policy'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/SetNetworkPolicy', + request_serializer=cluster_service.SetNetworkPolicyRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['set_network_policy'] + + @property + def set_maintenance_policy(self) -> Callable[ + [cluster_service.SetMaintenancePolicyRequest], + cluster_service.Operation]: + r"""Return a callable for the set maintenance policy method over gRPC. + + Sets the maintenance policy for a cluster. + + Returns: + Callable[[~.SetMaintenancePolicyRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_maintenance_policy' not in self._stubs: + self._stubs['set_maintenance_policy'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/SetMaintenancePolicy', + request_serializer=cluster_service.SetMaintenancePolicyRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['set_maintenance_policy'] + + @property + def list_usable_subnetworks(self) -> Callable[ + [cluster_service.ListUsableSubnetworksRequest], + cluster_service.ListUsableSubnetworksResponse]: + r"""Return a callable for the list usable subnetworks method over gRPC. + + Lists subnetworks that are usable for creating + clusters in a project. + + Returns: + Callable[[~.ListUsableSubnetworksRequest], + ~.ListUsableSubnetworksResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_usable_subnetworks' not in self._stubs: + self._stubs['list_usable_subnetworks'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/ListUsableSubnetworks', + request_serializer=cluster_service.ListUsableSubnetworksRequest.serialize, + response_deserializer=cluster_service.ListUsableSubnetworksResponse.deserialize, + ) + return self._stubs['list_usable_subnetworks'] + + +__all__ = ( + 'ClusterManagerGrpcTransport', +) diff --git a/owl-bot-staging/v1/google/container_v1/services/cluster_manager/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/container_v1/services/cluster_manager/transports/grpc_asyncio.py new file mode 100644 index 00000000..bff48325 --- /dev/null +++ b/owl-bot-staging/v1/google/container_v1/services/cluster_manager/transports/grpc_asyncio.py @@ -0,0 +1,1101 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +import packaging.version + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.container_v1.types import cluster_service +from google.protobuf import empty_pb2 # type: ignore +from .base import ClusterManagerTransport, DEFAULT_CLIENT_INFO +from .grpc import ClusterManagerGrpcTransport + + +class ClusterManagerGrpcAsyncIOTransport(ClusterManagerTransport): + """gRPC AsyncIO backend transport for ClusterManager. + + Google Kubernetes Engine Cluster Manager v1 + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'container.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'container.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def list_clusters(self) -> Callable[ + [cluster_service.ListClustersRequest], + Awaitable[cluster_service.ListClustersResponse]]: + r"""Return a callable for the list clusters method over gRPC. + + Lists all clusters owned by a project in either the + specified zone or all zones. + + Returns: + Callable[[~.ListClustersRequest], + Awaitable[~.ListClustersResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_clusters' not in self._stubs: + self._stubs['list_clusters'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/ListClusters', + request_serializer=cluster_service.ListClustersRequest.serialize, + response_deserializer=cluster_service.ListClustersResponse.deserialize, + ) + return self._stubs['list_clusters'] + + @property + def get_cluster(self) -> Callable[ + [cluster_service.GetClusterRequest], + Awaitable[cluster_service.Cluster]]: + r"""Return a callable for the get cluster method over gRPC. + + Gets the details of a specific cluster. + + Returns: + Callable[[~.GetClusterRequest], + Awaitable[~.Cluster]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_cluster' not in self._stubs: + self._stubs['get_cluster'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/GetCluster', + request_serializer=cluster_service.GetClusterRequest.serialize, + response_deserializer=cluster_service.Cluster.deserialize, + ) + return self._stubs['get_cluster'] + + @property + def create_cluster(self) -> Callable[ + [cluster_service.CreateClusterRequest], + Awaitable[cluster_service.Operation]]: + r"""Return a callable for the create cluster method over gRPC. + + Creates a cluster, consisting of the specified number and type + of Google Compute Engine instances. + + By default, the cluster is created in the project's `default + network `__. + + One firewall is added for the cluster. After cluster creation, + the Kubelet creates routes for each node to allow the containers + on that node to communicate with all other instances in the + cluster. + + Finally, an entry is added to the project's global metadata + indicating which CIDR range the cluster is using. + + Returns: + Callable[[~.CreateClusterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_cluster' not in self._stubs: + self._stubs['create_cluster'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/CreateCluster', + request_serializer=cluster_service.CreateClusterRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['create_cluster'] + + @property + def update_cluster(self) -> Callable[ + [cluster_service.UpdateClusterRequest], + Awaitable[cluster_service.Operation]]: + r"""Return a callable for the update cluster method over gRPC. + + Updates the settings of a specific cluster. + + Returns: + Callable[[~.UpdateClusterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_cluster' not in self._stubs: + self._stubs['update_cluster'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/UpdateCluster', + request_serializer=cluster_service.UpdateClusterRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['update_cluster'] + + @property + def update_node_pool(self) -> Callable[ + [cluster_service.UpdateNodePoolRequest], + Awaitable[cluster_service.Operation]]: + r"""Return a callable for the update node pool method over gRPC. + + Updates the version and/or image type for the + specified node pool. + + Returns: + Callable[[~.UpdateNodePoolRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_node_pool' not in self._stubs: + self._stubs['update_node_pool'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/UpdateNodePool', + request_serializer=cluster_service.UpdateNodePoolRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['update_node_pool'] + + @property + def set_node_pool_autoscaling(self) -> Callable[ + [cluster_service.SetNodePoolAutoscalingRequest], + Awaitable[cluster_service.Operation]]: + r"""Return a callable for the set node pool autoscaling method over gRPC. + + Sets the autoscaling settings for the specified node + pool. + + Returns: + Callable[[~.SetNodePoolAutoscalingRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_node_pool_autoscaling' not in self._stubs: + self._stubs['set_node_pool_autoscaling'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/SetNodePoolAutoscaling', + request_serializer=cluster_service.SetNodePoolAutoscalingRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['set_node_pool_autoscaling'] + + @property + def set_logging_service(self) -> Callable[ + [cluster_service.SetLoggingServiceRequest], + Awaitable[cluster_service.Operation]]: + r"""Return a callable for the set logging service method over gRPC. + + Sets the logging service for a specific cluster. + + Returns: + Callable[[~.SetLoggingServiceRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_logging_service' not in self._stubs: + self._stubs['set_logging_service'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/SetLoggingService', + request_serializer=cluster_service.SetLoggingServiceRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['set_logging_service'] + + @property + def set_monitoring_service(self) -> Callable[ + [cluster_service.SetMonitoringServiceRequest], + Awaitable[cluster_service.Operation]]: + r"""Return a callable for the set monitoring service method over gRPC. + + Sets the monitoring service for a specific cluster. + + Returns: + Callable[[~.SetMonitoringServiceRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_monitoring_service' not in self._stubs: + self._stubs['set_monitoring_service'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/SetMonitoringService', + request_serializer=cluster_service.SetMonitoringServiceRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['set_monitoring_service'] + + @property + def set_addons_config(self) -> Callable[ + [cluster_service.SetAddonsConfigRequest], + Awaitable[cluster_service.Operation]]: + r"""Return a callable for the set addons config method over gRPC. + + Sets the addons for a specific cluster. + + Returns: + Callable[[~.SetAddonsConfigRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_addons_config' not in self._stubs: + self._stubs['set_addons_config'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/SetAddonsConfig', + request_serializer=cluster_service.SetAddonsConfigRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['set_addons_config'] + + @property + def set_locations(self) -> Callable[ + [cluster_service.SetLocationsRequest], + Awaitable[cluster_service.Operation]]: + r"""Return a callable for the set locations method over gRPC. + + Sets the locations for a specific cluster. Deprecated. Use + `projects.locations.clusters.update `__ + instead. + + Returns: + Callable[[~.SetLocationsRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_locations' not in self._stubs: + self._stubs['set_locations'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/SetLocations', + request_serializer=cluster_service.SetLocationsRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['set_locations'] + + @property + def update_master(self) -> Callable[ + [cluster_service.UpdateMasterRequest], + Awaitable[cluster_service.Operation]]: + r"""Return a callable for the update master method over gRPC. + + Updates the master for a specific cluster. + + Returns: + Callable[[~.UpdateMasterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_master' not in self._stubs: + self._stubs['update_master'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/UpdateMaster', + request_serializer=cluster_service.UpdateMasterRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['update_master'] + + @property + def set_master_auth(self) -> Callable[ + [cluster_service.SetMasterAuthRequest], + Awaitable[cluster_service.Operation]]: + r"""Return a callable for the set master auth method over gRPC. + + Sets master auth materials. Currently supports + changing the admin password or a specific cluster, + either via password generation or explicitly setting the + password. + + Returns: + Callable[[~.SetMasterAuthRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_master_auth' not in self._stubs: + self._stubs['set_master_auth'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/SetMasterAuth', + request_serializer=cluster_service.SetMasterAuthRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['set_master_auth'] + + @property + def delete_cluster(self) -> Callable[ + [cluster_service.DeleteClusterRequest], + Awaitable[cluster_service.Operation]]: + r"""Return a callable for the delete cluster method over gRPC. + + Deletes the cluster, including the Kubernetes + endpoint and all worker nodes. + + Firewalls and routes that were configured during cluster + creation are also deleted. + + Other Google Compute Engine resources that might be in + use by the cluster, such as load balancer resources, are + not deleted if they weren't present when the cluster was + initially created. + + Returns: + Callable[[~.DeleteClusterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_cluster' not in self._stubs: + self._stubs['delete_cluster'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/DeleteCluster', + request_serializer=cluster_service.DeleteClusterRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['delete_cluster'] + + @property + def list_operations(self) -> Callable[ + [cluster_service.ListOperationsRequest], + Awaitable[cluster_service.ListOperationsResponse]]: + r"""Return a callable for the list operations method over gRPC. + + Lists all operations in a project in a specific zone + or all zones. + + Returns: + Callable[[~.ListOperationsRequest], + Awaitable[~.ListOperationsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_operations' not in self._stubs: + self._stubs['list_operations'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/ListOperations', + request_serializer=cluster_service.ListOperationsRequest.serialize, + response_deserializer=cluster_service.ListOperationsResponse.deserialize, + ) + return self._stubs['list_operations'] + + @property + def get_operation(self) -> Callable[ + [cluster_service.GetOperationRequest], + Awaitable[cluster_service.Operation]]: + r"""Return a callable for the get operation method over gRPC. + + Gets the specified operation. + + Returns: + Callable[[~.GetOperationRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_operation' not in self._stubs: + self._stubs['get_operation'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/GetOperation', + request_serializer=cluster_service.GetOperationRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['get_operation'] + + @property + def cancel_operation(self) -> Callable[ + [cluster_service.CancelOperationRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the cancel operation method over gRPC. + + Cancels the specified operation. + + Returns: + Callable[[~.CancelOperationRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_operation' not in self._stubs: + self._stubs['cancel_operation'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/CancelOperation', + request_serializer=cluster_service.CancelOperationRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_operation'] + + @property + def get_server_config(self) -> Callable[ + [cluster_service.GetServerConfigRequest], + Awaitable[cluster_service.ServerConfig]]: + r"""Return a callable for the get server config method over gRPC. + + Returns configuration info about the Google + Kubernetes Engine service. + + Returns: + Callable[[~.GetServerConfigRequest], + Awaitable[~.ServerConfig]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_server_config' not in self._stubs: + self._stubs['get_server_config'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/GetServerConfig', + request_serializer=cluster_service.GetServerConfigRequest.serialize, + response_deserializer=cluster_service.ServerConfig.deserialize, + ) + return self._stubs['get_server_config'] + + @property + def get_json_web_keys(self) -> Callable[ + [cluster_service.GetJSONWebKeysRequest], + Awaitable[cluster_service.GetJSONWebKeysResponse]]: + r"""Return a callable for the get json web keys method over gRPC. + + Gets the public component of the cluster signing keys + in JSON Web Key format. + This API is not yet intended for general use, and is not + available for all clusters. + + Returns: + Callable[[~.GetJSONWebKeysRequest], + Awaitable[~.GetJSONWebKeysResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_json_web_keys' not in self._stubs: + self._stubs['get_json_web_keys'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/GetJSONWebKeys', + request_serializer=cluster_service.GetJSONWebKeysRequest.serialize, + response_deserializer=cluster_service.GetJSONWebKeysResponse.deserialize, + ) + return self._stubs['get_json_web_keys'] + + @property + def list_node_pools(self) -> Callable[ + [cluster_service.ListNodePoolsRequest], + Awaitable[cluster_service.ListNodePoolsResponse]]: + r"""Return a callable for the list node pools method over gRPC. + + Lists the node pools for a cluster. + + Returns: + Callable[[~.ListNodePoolsRequest], + Awaitable[~.ListNodePoolsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_node_pools' not in self._stubs: + self._stubs['list_node_pools'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/ListNodePools', + request_serializer=cluster_service.ListNodePoolsRequest.serialize, + response_deserializer=cluster_service.ListNodePoolsResponse.deserialize, + ) + return self._stubs['list_node_pools'] + + @property + def get_node_pool(self) -> Callable[ + [cluster_service.GetNodePoolRequest], + Awaitable[cluster_service.NodePool]]: + r"""Return a callable for the get node pool method over gRPC. + + Retrieves the requested node pool. + + Returns: + Callable[[~.GetNodePoolRequest], + Awaitable[~.NodePool]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_node_pool' not in self._stubs: + self._stubs['get_node_pool'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/GetNodePool', + request_serializer=cluster_service.GetNodePoolRequest.serialize, + response_deserializer=cluster_service.NodePool.deserialize, + ) + return self._stubs['get_node_pool'] + + @property + def create_node_pool(self) -> Callable[ + [cluster_service.CreateNodePoolRequest], + Awaitable[cluster_service.Operation]]: + r"""Return a callable for the create node pool method over gRPC. + + Creates a node pool for a cluster. + + Returns: + Callable[[~.CreateNodePoolRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_node_pool' not in self._stubs: + self._stubs['create_node_pool'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/CreateNodePool', + request_serializer=cluster_service.CreateNodePoolRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['create_node_pool'] + + @property + def delete_node_pool(self) -> Callable[ + [cluster_service.DeleteNodePoolRequest], + Awaitable[cluster_service.Operation]]: + r"""Return a callable for the delete node pool method over gRPC. + + Deletes a node pool from a cluster. + + Returns: + Callable[[~.DeleteNodePoolRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_node_pool' not in self._stubs: + self._stubs['delete_node_pool'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/DeleteNodePool', + request_serializer=cluster_service.DeleteNodePoolRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['delete_node_pool'] + + @property + def rollback_node_pool_upgrade(self) -> Callable[ + [cluster_service.RollbackNodePoolUpgradeRequest], + Awaitable[cluster_service.Operation]]: + r"""Return a callable for the rollback node pool upgrade method over gRPC. + + Rolls back a previously Aborted or Failed NodePool + upgrade. This makes no changes if the last upgrade + successfully completed. + + Returns: + Callable[[~.RollbackNodePoolUpgradeRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'rollback_node_pool_upgrade' not in self._stubs: + self._stubs['rollback_node_pool_upgrade'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/RollbackNodePoolUpgrade', + request_serializer=cluster_service.RollbackNodePoolUpgradeRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['rollback_node_pool_upgrade'] + + @property + def set_node_pool_management(self) -> Callable[ + [cluster_service.SetNodePoolManagementRequest], + Awaitable[cluster_service.Operation]]: + r"""Return a callable for the set node pool management method over gRPC. + + Sets the NodeManagement options for a node pool. + + Returns: + Callable[[~.SetNodePoolManagementRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_node_pool_management' not in self._stubs: + self._stubs['set_node_pool_management'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/SetNodePoolManagement', + request_serializer=cluster_service.SetNodePoolManagementRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['set_node_pool_management'] + + @property + def set_labels(self) -> Callable[ + [cluster_service.SetLabelsRequest], + Awaitable[cluster_service.Operation]]: + r"""Return a callable for the set labels method over gRPC. + + Sets labels on a cluster. + + Returns: + Callable[[~.SetLabelsRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_labels' not in self._stubs: + self._stubs['set_labels'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/SetLabels', + request_serializer=cluster_service.SetLabelsRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['set_labels'] + + @property + def set_legacy_abac(self) -> Callable[ + [cluster_service.SetLegacyAbacRequest], + Awaitable[cluster_service.Operation]]: + r"""Return a callable for the set legacy abac method over gRPC. + + Enables or disables the ABAC authorization mechanism + on a cluster. + + Returns: + Callable[[~.SetLegacyAbacRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_legacy_abac' not in self._stubs: + self._stubs['set_legacy_abac'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/SetLegacyAbac', + request_serializer=cluster_service.SetLegacyAbacRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['set_legacy_abac'] + + @property + def start_ip_rotation(self) -> Callable[ + [cluster_service.StartIPRotationRequest], + Awaitable[cluster_service.Operation]]: + r"""Return a callable for the start ip rotation method over gRPC. + + Starts master IP rotation. + + Returns: + Callable[[~.StartIPRotationRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'start_ip_rotation' not in self._stubs: + self._stubs['start_ip_rotation'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/StartIPRotation', + request_serializer=cluster_service.StartIPRotationRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['start_ip_rotation'] + + @property + def complete_ip_rotation(self) -> Callable[ + [cluster_service.CompleteIPRotationRequest], + Awaitable[cluster_service.Operation]]: + r"""Return a callable for the complete ip rotation method over gRPC. + + Completes master IP rotation. + + Returns: + Callable[[~.CompleteIPRotationRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'complete_ip_rotation' not in self._stubs: + self._stubs['complete_ip_rotation'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/CompleteIPRotation', + request_serializer=cluster_service.CompleteIPRotationRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['complete_ip_rotation'] + + @property + def set_node_pool_size(self) -> Callable[ + [cluster_service.SetNodePoolSizeRequest], + Awaitable[cluster_service.Operation]]: + r"""Return a callable for the set node pool size method over gRPC. + + Sets the size for a specific node pool. + + Returns: + Callable[[~.SetNodePoolSizeRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_node_pool_size' not in self._stubs: + self._stubs['set_node_pool_size'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/SetNodePoolSize', + request_serializer=cluster_service.SetNodePoolSizeRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['set_node_pool_size'] + + @property + def set_network_policy(self) -> Callable[ + [cluster_service.SetNetworkPolicyRequest], + Awaitable[cluster_service.Operation]]: + r"""Return a callable for the set network policy method over gRPC. + + Enables or disables Network Policy for a cluster. + + Returns: + Callable[[~.SetNetworkPolicyRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_network_policy' not in self._stubs: + self._stubs['set_network_policy'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/SetNetworkPolicy', + request_serializer=cluster_service.SetNetworkPolicyRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['set_network_policy'] + + @property + def set_maintenance_policy(self) -> Callable[ + [cluster_service.SetMaintenancePolicyRequest], + Awaitable[cluster_service.Operation]]: + r"""Return a callable for the set maintenance policy method over gRPC. + + Sets the maintenance policy for a cluster. + + Returns: + Callable[[~.SetMaintenancePolicyRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_maintenance_policy' not in self._stubs: + self._stubs['set_maintenance_policy'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/SetMaintenancePolicy', + request_serializer=cluster_service.SetMaintenancePolicyRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['set_maintenance_policy'] + + @property + def list_usable_subnetworks(self) -> Callable[ + [cluster_service.ListUsableSubnetworksRequest], + Awaitable[cluster_service.ListUsableSubnetworksResponse]]: + r"""Return a callable for the list usable subnetworks method over gRPC. + + Lists subnetworks that are usable for creating + clusters in a project. + + Returns: + Callable[[~.ListUsableSubnetworksRequest], + Awaitable[~.ListUsableSubnetworksResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_usable_subnetworks' not in self._stubs: + self._stubs['list_usable_subnetworks'] = self.grpc_channel.unary_unary( + '/google.container.v1.ClusterManager/ListUsableSubnetworks', + request_serializer=cluster_service.ListUsableSubnetworksRequest.serialize, + response_deserializer=cluster_service.ListUsableSubnetworksResponse.deserialize, + ) + return self._stubs['list_usable_subnetworks'] + + +__all__ = ( + 'ClusterManagerGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1/google/container_v1/types/__init__.py b/owl-bot-staging/v1/google/container_v1/types/__init__.py new file mode 100644 index 00000000..fe1f93fb --- /dev/null +++ b/owl-bot-staging/v1/google/container_v1/types/__init__.py @@ -0,0 +1,210 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .cluster_service import ( + AcceleratorConfig, + AddonsConfig, + AuthenticatorGroupsConfig, + AutoprovisioningNodePoolDefaults, + AutoUpgradeOptions, + BinaryAuthorization, + CancelOperationRequest, + ClientCertificateConfig, + CloudRunConfig, + Cluster, + ClusterAutoscaling, + ClusterUpdate, + CompleteIPRotationRequest, + ConfigConnectorConfig, + CreateClusterRequest, + CreateNodePoolRequest, + DailyMaintenanceWindow, + DatabaseEncryption, + DefaultSnatStatus, + DeleteClusterRequest, + DeleteNodePoolRequest, + DnsCacheConfig, + GetClusterRequest, + GetJSONWebKeysRequest, + GetJSONWebKeysResponse, + GetNodePoolRequest, + GetOpenIDConfigRequest, + GetOpenIDConfigResponse, + GetOperationRequest, + GetServerConfigRequest, + HorizontalPodAutoscaling, + HttpLoadBalancing, + IntraNodeVisibilityConfig, + IPAllocationPolicy, + Jwk, + KubernetesDashboard, + LegacyAbac, + ListClustersRequest, + ListClustersResponse, + ListNodePoolsRequest, + ListNodePoolsResponse, + ListOperationsRequest, + ListOperationsResponse, + ListUsableSubnetworksRequest, + ListUsableSubnetworksResponse, + MaintenancePolicy, + MaintenanceWindow, + MasterAuth, + MasterAuthorizedNetworksConfig, + MaxPodsConstraint, + NetworkConfig, + NetworkPolicy, + NetworkPolicyConfig, + NodeConfig, + NodeManagement, + NodePool, + NodePoolAutoscaling, + NodeTaint, + Operation, + OperationProgress, + PrivateClusterConfig, + PrivateClusterMasterGlobalAccessConfig, + RecurringTimeWindow, + ReleaseChannel, + ReservationAffinity, + ResourceLimit, + ResourceUsageExportConfig, + RollbackNodePoolUpgradeRequest, + SandboxConfig, + ServerConfig, + SetAddonsConfigRequest, + SetLabelsRequest, + SetLegacyAbacRequest, + SetLocationsRequest, + SetLoggingServiceRequest, + SetMaintenancePolicyRequest, + SetMasterAuthRequest, + SetMonitoringServiceRequest, + SetNetworkPolicyRequest, + SetNodePoolAutoscalingRequest, + SetNodePoolManagementRequest, + SetNodePoolSizeRequest, + ShieldedInstanceConfig, + ShieldedNodes, + StartIPRotationRequest, + StatusCondition, + TimeWindow, + UpdateClusterRequest, + UpdateMasterRequest, + UpdateNodePoolRequest, + UsableSubnetwork, + UsableSubnetworkSecondaryRange, + VerticalPodAutoscaling, + WorkloadIdentityConfig, + WorkloadMetadataConfig, +) + +__all__ = ( + 'AcceleratorConfig', + 'AddonsConfig', + 'AuthenticatorGroupsConfig', + 'AutoprovisioningNodePoolDefaults', + 'AutoUpgradeOptions', + 'BinaryAuthorization', + 'CancelOperationRequest', + 'ClientCertificateConfig', + 'CloudRunConfig', + 'Cluster', + 'ClusterAutoscaling', + 'ClusterUpdate', + 'CompleteIPRotationRequest', + 'ConfigConnectorConfig', + 'CreateClusterRequest', + 'CreateNodePoolRequest', + 'DailyMaintenanceWindow', + 'DatabaseEncryption', + 'DefaultSnatStatus', + 'DeleteClusterRequest', + 'DeleteNodePoolRequest', + 'DnsCacheConfig', + 'GetClusterRequest', + 'GetJSONWebKeysRequest', + 'GetJSONWebKeysResponse', + 'GetNodePoolRequest', + 'GetOpenIDConfigRequest', + 'GetOpenIDConfigResponse', + 'GetOperationRequest', + 'GetServerConfigRequest', + 'HorizontalPodAutoscaling', + 'HttpLoadBalancing', + 'IntraNodeVisibilityConfig', + 'IPAllocationPolicy', + 'Jwk', + 'KubernetesDashboard', + 'LegacyAbac', + 'ListClustersRequest', + 'ListClustersResponse', + 'ListNodePoolsRequest', + 'ListNodePoolsResponse', + 'ListOperationsRequest', + 'ListOperationsResponse', + 'ListUsableSubnetworksRequest', + 'ListUsableSubnetworksResponse', + 'MaintenancePolicy', + 'MaintenanceWindow', + 'MasterAuth', + 'MasterAuthorizedNetworksConfig', + 'MaxPodsConstraint', + 'NetworkConfig', + 'NetworkPolicy', + 'NetworkPolicyConfig', + 'NodeConfig', + 'NodeManagement', + 'NodePool', + 'NodePoolAutoscaling', + 'NodeTaint', + 'Operation', + 'OperationProgress', + 'PrivateClusterConfig', + 'PrivateClusterMasterGlobalAccessConfig', + 'RecurringTimeWindow', + 'ReleaseChannel', + 'ReservationAffinity', + 'ResourceLimit', + 'ResourceUsageExportConfig', + 'RollbackNodePoolUpgradeRequest', + 'SandboxConfig', + 'ServerConfig', + 'SetAddonsConfigRequest', + 'SetLabelsRequest', + 'SetLegacyAbacRequest', + 'SetLocationsRequest', + 'SetLoggingServiceRequest', + 'SetMaintenancePolicyRequest', + 'SetMasterAuthRequest', + 'SetMonitoringServiceRequest', + 'SetNetworkPolicyRequest', + 'SetNodePoolAutoscalingRequest', + 'SetNodePoolManagementRequest', + 'SetNodePoolSizeRequest', + 'ShieldedInstanceConfig', + 'ShieldedNodes', + 'StartIPRotationRequest', + 'StatusCondition', + 'TimeWindow', + 'UpdateClusterRequest', + 'UpdateMasterRequest', + 'UpdateNodePoolRequest', + 'UsableSubnetwork', + 'UsableSubnetworkSecondaryRange', + 'VerticalPodAutoscaling', + 'WorkloadIdentityConfig', + 'WorkloadMetadataConfig', +) diff --git a/owl-bot-staging/v1/google/container_v1/types/cluster_service.py b/owl-bot-staging/v1/google/container_v1/types/cluster_service.py new file mode 100644 index 00000000..25e7f0b1 --- /dev/null +++ b/owl-bot-staging/v1/google/container_v1/types/cluster_service.py @@ -0,0 +1,5120 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.container.v1', + manifest={ + 'NodeConfig', + 'ShieldedInstanceConfig', + 'SandboxConfig', + 'ReservationAffinity', + 'NodeTaint', + 'MasterAuth', + 'ClientCertificateConfig', + 'AddonsConfig', + 'HttpLoadBalancing', + 'HorizontalPodAutoscaling', + 'KubernetesDashboard', + 'NetworkPolicyConfig', + 'DnsCacheConfig', + 'PrivateClusterMasterGlobalAccessConfig', + 'PrivateClusterConfig', + 'AuthenticatorGroupsConfig', + 'CloudRunConfig', + 'ConfigConnectorConfig', + 'MasterAuthorizedNetworksConfig', + 'LegacyAbac', + 'NetworkPolicy', + 'BinaryAuthorization', + 'IPAllocationPolicy', + 'Cluster', + 'ClusterUpdate', + 'Operation', + 'OperationProgress', + 'CreateClusterRequest', + 'GetClusterRequest', + 'UpdateClusterRequest', + 'UpdateNodePoolRequest', + 'SetNodePoolAutoscalingRequest', + 'SetLoggingServiceRequest', + 'SetMonitoringServiceRequest', + 'SetAddonsConfigRequest', + 'SetLocationsRequest', + 'UpdateMasterRequest', + 'SetMasterAuthRequest', + 'DeleteClusterRequest', + 'ListClustersRequest', + 'ListClustersResponse', + 'GetOperationRequest', + 'ListOperationsRequest', + 'CancelOperationRequest', + 'ListOperationsResponse', + 'GetServerConfigRequest', + 'ServerConfig', + 'CreateNodePoolRequest', + 'DeleteNodePoolRequest', + 'ListNodePoolsRequest', + 'GetNodePoolRequest', + 'NodePool', + 'NodeManagement', + 'AutoUpgradeOptions', + 'MaintenancePolicy', + 'MaintenanceWindow', + 'TimeWindow', + 'RecurringTimeWindow', + 'DailyMaintenanceWindow', + 'SetNodePoolManagementRequest', + 'SetNodePoolSizeRequest', + 'RollbackNodePoolUpgradeRequest', + 'ListNodePoolsResponse', + 'ClusterAutoscaling', + 'AutoprovisioningNodePoolDefaults', + 'ResourceLimit', + 'NodePoolAutoscaling', + 'SetLabelsRequest', + 'SetLegacyAbacRequest', + 'StartIPRotationRequest', + 'CompleteIPRotationRequest', + 'AcceleratorConfig', + 'WorkloadMetadataConfig', + 'SetNetworkPolicyRequest', + 'SetMaintenancePolicyRequest', + 'StatusCondition', + 'NetworkConfig', + 'GetOpenIDConfigRequest', + 'GetOpenIDConfigResponse', + 'GetJSONWebKeysRequest', + 'Jwk', + 'GetJSONWebKeysResponse', + 'ReleaseChannel', + 'IntraNodeVisibilityConfig', + 'MaxPodsConstraint', + 'WorkloadIdentityConfig', + 'DatabaseEncryption', + 'ListUsableSubnetworksRequest', + 'ListUsableSubnetworksResponse', + 'UsableSubnetworkSecondaryRange', + 'UsableSubnetwork', + 'ResourceUsageExportConfig', + 'VerticalPodAutoscaling', + 'DefaultSnatStatus', + 'ShieldedNodes', + }, +) + + +class NodeConfig(proto.Message): + r"""Parameters that describe the nodes in a cluster. + Attributes: + machine_type (str): + The name of a Google Compute Engine `machine + type `__ + + If unspecified, the default machine type is ``e2-medium``. + disk_size_gb (int): + Size of the disk attached to each node, + specified in GB. The smallest allowed disk size + is 10GB. + If unspecified, the default disk size is 100GB. + oauth_scopes (Sequence[str]): + The set of Google API scopes to be made available on all of + the node VMs under the "default" service account. + + The following scopes are recommended, but not required, and + by default are not included: + + - ``https://www.googleapis.com/auth/compute`` is required + for mounting persistent storage on your nodes. + - ``https://www.googleapis.com/auth/devstorage.read_only`` + is required for communicating with **gcr.io** (the + `Google Container + Registry `__). + + If unspecified, no scopes are added, unless Cloud Logging or + Cloud Monitoring are enabled, in which case their required + scopes will be added. + service_account (str): + The Google Cloud Platform Service Account to + be used by the node VMs. Specify the email + address of the Service Account; otherwise, if no + Service Account is specified, the "default" + service account is used. + metadata (Sequence[google.container_v1.types.NodeConfig.MetadataEntry]): + The metadata key/value pairs assigned to instances in the + cluster. + + Keys must conform to the regexp ``[a-zA-Z0-9-_]+`` and be + less than 128 bytes in length. These are reflected as part + of a URL in the metadata server. Additionally, to avoid + ambiguity, keys must not conflict with any other metadata + keys for the project or be one of the reserved keys: + + - "cluster-location" + - "cluster-name" + - "cluster-uid" + - "configure-sh" + - "containerd-configure-sh" + - "enable-os-login" + - "gci-ensure-gke-docker" + - "gci-metrics-enabled" + - "gci-update-strategy" + - "instance-template" + - "kube-env" + - "startup-script" + - "user-data" + - "disable-address-manager" + - "windows-startup-script-ps1" + - "common-psm1" + - "k8s-node-setup-psm1" + - "install-ssh-psm1" + - "user-profile-psm1" + + The following keys are reserved for Windows nodes: + + - "serial-port-logging-enable" + + Values are free-form strings, and only have meaning as + interpreted by the image running in the instance. The only + restriction placed on them is that each value's size must be + less than or equal to 32 KB. + + The total size of all keys and values must be less than 512 + KB. + image_type (str): + The image type to use for this node. Note + that for a given image type, the latest version + of it will be used. + labels (Sequence[google.container_v1.types.NodeConfig.LabelsEntry]): + The map of Kubernetes labels (key/value + pairs) to be applied to each node. These will + added in addition to any default label(s) that + Kubernetes may apply to the node. + In case of conflict in label keys, the applied + set may differ depending on the Kubernetes + version -- it's best to assume the behavior is + undefined and conflicts should be avoided. + For more information, including usage and the + valid values, see: + https://kubernetes.io/docs/concepts/overview/working- + with-objects/labels/ + local_ssd_count (int): + The number of local SSD disks to be attached + to the node. + The limit for this value is dependent upon the + maximum number of disks available on a machine + per zone. See: + https://cloud.google.com/compute/docs/disks/local- + ssd for more information. + tags (Sequence[str]): + The list of instance tags applied to all + nodes. Tags are used to identify valid sources + or targets for network firewalls and are + specified by the client during cluster or node + pool creation. Each tag within the list must + comply with RFC1035. + preemptible (bool): + Whether the nodes are created as preemptible + VM instances. See: + https://cloud.google.com/compute/docs/instances/preemptible + for more information about preemptible VM + instances. + accelerators (Sequence[google.container_v1.types.AcceleratorConfig]): + A list of hardware accelerators to be + attached to each node. See + https://cloud.google.com/compute/docs/gpus for + more information about support for GPUs. + disk_type (str): + Type of the disk attached to each node (e.g. + 'pd-standard', 'pd-ssd' or 'pd-balanced') + + If unspecified, the default disk type is 'pd- + standard' + min_cpu_platform (str): + Minimum CPU platform to be used by this instance. The + instance may be scheduled on the specified or newer CPU + platform. Applicable values are the friendly names of CPU + platforms, such as ``minCpuPlatform: "Intel Haswell"`` or + ``minCpuPlatform: "Intel Sandy Bridge"``. For more + information, read `how to specify min CPU + platform `__ + workload_metadata_config (google.container_v1.types.WorkloadMetadataConfig): + The workload metadata configuration for this + node. + taints (Sequence[google.container_v1.types.NodeTaint]): + List of kubernetes taints to be applied to + each node. + For more information, including usage and the + valid values, see: + https://kubernetes.io/docs/concepts/configuration/taint- + and-toleration/ + sandbox_config (google.container_v1.types.SandboxConfig): + Sandbox configuration for this node. + node_group (str): + Setting this field will assign instances of this pool to run + on the specified node group. This is useful for running + workloads on `sole tenant + nodes `__. + reservation_affinity (google.container_v1.types.ReservationAffinity): + The optional reservation affinity. Setting this field will + apply the specified `Zonal Compute + Reservation `__ + to this node pool. + shielded_instance_config (google.container_v1.types.ShieldedInstanceConfig): + Shielded Instance options. + boot_disk_kms_key (str): + The Customer Managed Encryption Key used to encrypt the boot + disk attached to each node in the node pool. This should be + of the form + projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. + For more information about protecting resources with Cloud + KMS Keys please see: + https://cloud.google.com/compute/docs/disks/customer-managed-encryption + """ + + machine_type = proto.Field( + proto.STRING, + number=1, + ) + disk_size_gb = proto.Field( + proto.INT32, + number=2, + ) + oauth_scopes = proto.RepeatedField( + proto.STRING, + number=3, + ) + service_account = proto.Field( + proto.STRING, + number=9, + ) + metadata = proto.MapField( + proto.STRING, + proto.STRING, + number=4, + ) + image_type = proto.Field( + proto.STRING, + number=5, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=6, + ) + local_ssd_count = proto.Field( + proto.INT32, + number=7, + ) + tags = proto.RepeatedField( + proto.STRING, + number=8, + ) + preemptible = proto.Field( + proto.BOOL, + number=10, + ) + accelerators = proto.RepeatedField( + proto.MESSAGE, + number=11, + message='AcceleratorConfig', + ) + disk_type = proto.Field( + proto.STRING, + number=12, + ) + min_cpu_platform = proto.Field( + proto.STRING, + number=13, + ) + workload_metadata_config = proto.Field( + proto.MESSAGE, + number=14, + message='WorkloadMetadataConfig', + ) + taints = proto.RepeatedField( + proto.MESSAGE, + number=15, + message='NodeTaint', + ) + sandbox_config = proto.Field( + proto.MESSAGE, + number=17, + message='SandboxConfig', + ) + node_group = proto.Field( + proto.STRING, + number=18, + ) + reservation_affinity = proto.Field( + proto.MESSAGE, + number=19, + message='ReservationAffinity', + ) + shielded_instance_config = proto.Field( + proto.MESSAGE, + number=20, + message='ShieldedInstanceConfig', + ) + boot_disk_kms_key = proto.Field( + proto.STRING, + number=23, + ) + + +class ShieldedInstanceConfig(proto.Message): + r"""A set of Shielded Instance options. + Attributes: + enable_secure_boot (bool): + Defines whether the instance has Secure Boot + enabled. + Secure Boot helps ensure that the system only + runs authentic software by verifying the digital + signature of all boot components, and halting + the boot process if signature verification + fails. + enable_integrity_monitoring (bool): + Defines whether the instance has integrity + monitoring enabled. + Enables monitoring and attestation of the boot + integrity of the instance. The attestation is + performed against the integrity policy baseline. + This baseline is initially derived from the + implicitly trusted boot image when the instance + is created. + """ + + enable_secure_boot = proto.Field( + proto.BOOL, + number=1, + ) + enable_integrity_monitoring = proto.Field( + proto.BOOL, + number=2, + ) + + +class SandboxConfig(proto.Message): + r"""SandboxConfig contains configurations of the sandbox to use + for the node. + + Attributes: + type_ (google.container_v1.types.SandboxConfig.Type): + Type of the sandbox to use for the node. + """ + class Type(proto.Enum): + r"""Possible types of sandboxes.""" + UNSPECIFIED = 0 + GVISOR = 1 + + type_ = proto.Field( + proto.ENUM, + number=2, + enum=Type, + ) + + +class ReservationAffinity(proto.Message): + r"""`ReservationAffinity `__ + is the configuration of desired reservation which instances could + take capacity from. + + Attributes: + consume_reservation_type (google.container_v1.types.ReservationAffinity.Type): + Corresponds to the type of reservation + consumption. + key (str): + Corresponds to the label key of a reservation resource. To + target a SPECIFIC_RESERVATION by name, specify + "googleapis.com/reservation-name" as the key and specify the + name of your reservation as its value. + values (Sequence[str]): + Corresponds to the label value(s) of + reservation resource(s). + """ + class Type(proto.Enum): + r"""Indicates whether to consume capacity from a reservation or + not. + """ + UNSPECIFIED = 0 + NO_RESERVATION = 1 + ANY_RESERVATION = 2 + SPECIFIC_RESERVATION = 3 + + consume_reservation_type = proto.Field( + proto.ENUM, + number=1, + enum=Type, + ) + key = proto.Field( + proto.STRING, + number=2, + ) + values = proto.RepeatedField( + proto.STRING, + number=3, + ) + + +class NodeTaint(proto.Message): + r"""Kubernetes taint is comprised of three fields: key, value, and + effect. Effect can only be one of three types: NoSchedule, + PreferNoSchedule or NoExecute. + + See + `here `__ + for more information, including usage and the valid values. + + Attributes: + key (str): + Key for taint. + value (str): + Value for taint. + effect (google.container_v1.types.NodeTaint.Effect): + Effect for taint. + """ + class Effect(proto.Enum): + r"""Possible values for Effect in taint.""" + EFFECT_UNSPECIFIED = 0 + NO_SCHEDULE = 1 + PREFER_NO_SCHEDULE = 2 + NO_EXECUTE = 3 + + key = proto.Field( + proto.STRING, + number=1, + ) + value = proto.Field( + proto.STRING, + number=2, + ) + effect = proto.Field( + proto.ENUM, + number=3, + enum=Effect, + ) + + +class MasterAuth(proto.Message): + r"""The authentication information for accessing the master + endpoint. Authentication can be done using HTTP basic auth or + using client certificates. + + Attributes: + username (str): + The username to use for HTTP basic + authentication to the master endpoint. For + clusters v1.6.0 and later, basic authentication + can be disabled by leaving username unspecified + (or setting it to the empty string). + Warning: basic authentication is deprecated, and + will be removed in GKE control plane versions + 1.19 and newer. For a list of recommended + authentication methods, see: + https://cloud.google.com/kubernetes- + engine/docs/how-to/api-server-authentication + password (str): + The password to use for HTTP basic + authentication to the master endpoint. Because + the master endpoint is open to the Internet, you + should create a strong password. If a password + is provided for cluster creation, username must + be non-empty. + + Warning: basic authentication is deprecated, and + will be removed in GKE control plane versions + 1.19 and newer. For a list of recommended + authentication methods, see: + https://cloud.google.com/kubernetes- + engine/docs/how-to/api-server-authentication + client_certificate_config (google.container_v1.types.ClientCertificateConfig): + Configuration for client certificate + authentication on the cluster. For clusters + before v1.12, if no configuration is specified, + a client certificate is issued. + cluster_ca_certificate (str): + [Output only] Base64-encoded public certificate that is the + root of trust for the cluster. + client_certificate (str): + [Output only] Base64-encoded public certificate used by + clients to authenticate to the cluster endpoint. + client_key (str): + [Output only] Base64-encoded private key used by clients to + authenticate to the cluster endpoint. + """ + + username = proto.Field( + proto.STRING, + number=1, + ) + password = proto.Field( + proto.STRING, + number=2, + ) + client_certificate_config = proto.Field( + proto.MESSAGE, + number=3, + message='ClientCertificateConfig', + ) + cluster_ca_certificate = proto.Field( + proto.STRING, + number=100, + ) + client_certificate = proto.Field( + proto.STRING, + number=101, + ) + client_key = proto.Field( + proto.STRING, + number=102, + ) + + +class ClientCertificateConfig(proto.Message): + r"""Configuration for client certificates on the cluster. + Attributes: + issue_client_certificate (bool): + Issue a client certificate. + """ + + issue_client_certificate = proto.Field( + proto.BOOL, + number=1, + ) + + +class AddonsConfig(proto.Message): + r"""Configuration for the addons that can be automatically spun + up in the cluster, enabling additional functionality. + + Attributes: + http_load_balancing (google.container_v1.types.HttpLoadBalancing): + Configuration for the HTTP (L7) load + balancing controller addon, which makes it easy + to set up HTTP load balancers for services in a + cluster. + horizontal_pod_autoscaling (google.container_v1.types.HorizontalPodAutoscaling): + Configuration for the horizontal pod + autoscaling feature, which increases or + decreases the number of replica pods a + replication controller has based on the resource + usage of the existing pods. + kubernetes_dashboard (google.container_v1.types.KubernetesDashboard): + Configuration for the Kubernetes Dashboard. + This addon is deprecated, and will be disabled + in 1.15. It is recommended to use the Cloud + Console to manage and monitor your Kubernetes + clusters, workloads and applications. For more + information, see: + https://cloud.google.com/kubernetes- + engine/docs/concepts/dashboards + network_policy_config (google.container_v1.types.NetworkPolicyConfig): + Configuration for NetworkPolicy. This only + tracks whether the addon is enabled or not on + the Master, it does not track whether network + policy is enabled for the nodes. + cloud_run_config (google.container_v1.types.CloudRunConfig): + Configuration for the Cloud Run addon, which + allows the user to use a managed Knative + service. + dns_cache_config (google.container_v1.types.DnsCacheConfig): + Configuration for NodeLocalDNS, a dns cache + running on cluster nodes + config_connector_config (google.container_v1.types.ConfigConnectorConfig): + Configuration for the ConfigConnector add-on, + a Kubernetes extension to manage hosted GCP + services through the Kubernetes API + """ + + http_load_balancing = proto.Field( + proto.MESSAGE, + number=1, + message='HttpLoadBalancing', + ) + horizontal_pod_autoscaling = proto.Field( + proto.MESSAGE, + number=2, + message='HorizontalPodAutoscaling', + ) + kubernetes_dashboard = proto.Field( + proto.MESSAGE, + number=3, + message='KubernetesDashboard', + ) + network_policy_config = proto.Field( + proto.MESSAGE, + number=4, + message='NetworkPolicyConfig', + ) + cloud_run_config = proto.Field( + proto.MESSAGE, + number=7, + message='CloudRunConfig', + ) + dns_cache_config = proto.Field( + proto.MESSAGE, + number=8, + message='DnsCacheConfig', + ) + config_connector_config = proto.Field( + proto.MESSAGE, + number=10, + message='ConfigConnectorConfig', + ) + + +class HttpLoadBalancing(proto.Message): + r"""Configuration options for the HTTP (L7) load balancing + controller addon, which makes it easy to set up HTTP load + balancers for services in a cluster. + + Attributes: + disabled (bool): + Whether the HTTP Load Balancing controller is + enabled in the cluster. When enabled, it runs a + small pod in the cluster that manages the load + balancers. + """ + + disabled = proto.Field( + proto.BOOL, + number=1, + ) + + +class HorizontalPodAutoscaling(proto.Message): + r"""Configuration options for the horizontal pod autoscaling + feature, which increases or decreases the number of replica pods + a replication controller has based on the resource usage of the + existing pods. + + Attributes: + disabled (bool): + Whether the Horizontal Pod Autoscaling + feature is enabled in the cluster. When enabled, + it ensures that metrics are collected into + Stackdriver Monitoring. + """ + + disabled = proto.Field( + proto.BOOL, + number=1, + ) + + +class KubernetesDashboard(proto.Message): + r"""Configuration for the Kubernetes Dashboard. + Attributes: + disabled (bool): + Whether the Kubernetes Dashboard is enabled + for this cluster. + """ + + disabled = proto.Field( + proto.BOOL, + number=1, + ) + + +class NetworkPolicyConfig(proto.Message): + r"""Configuration for NetworkPolicy. This only tracks whether the + addon is enabled or not on the Master, it does not track whether + network policy is enabled for the nodes. + + Attributes: + disabled (bool): + Whether NetworkPolicy is enabled for this + cluster. + """ + + disabled = proto.Field( + proto.BOOL, + number=1, + ) + + +class DnsCacheConfig(proto.Message): + r"""Configuration for NodeLocal DNSCache + Attributes: + enabled (bool): + Whether NodeLocal DNSCache is enabled for + this cluster. + """ + + enabled = proto.Field( + proto.BOOL, + number=1, + ) + + +class PrivateClusterMasterGlobalAccessConfig(proto.Message): + r"""Configuration for controlling master global access settings. + Attributes: + enabled (bool): + Whenever master is accessible globally or + not. + """ + + enabled = proto.Field( + proto.BOOL, + number=1, + ) + + +class PrivateClusterConfig(proto.Message): + r"""Configuration options for private clusters. + Attributes: + enable_private_nodes (bool): + Whether nodes have internal IP addresses + only. If enabled, all nodes are given only RFC + 1918 private addresses and communicate with the + master via private networking. + enable_private_endpoint (bool): + Whether the master's internal IP address is + used as the cluster endpoint. + master_ipv4_cidr_block (str): + The IP range in CIDR notation to use for the + hosted master network. This range will be used + for assigning internal IP addresses to the + master or set of masters, as well as the ILB + VIP. This range must not overlap with any other + ranges in use within the cluster's network. + private_endpoint (str): + Output only. The internal IP address of this + cluster's master endpoint. + public_endpoint (str): + Output only. The external IP address of this + cluster's master endpoint. + peering_name (str): + Output only. The peering name in the customer + VPC used by this cluster. + master_global_access_config (google.container_v1.types.PrivateClusterMasterGlobalAccessConfig): + Controls master global access settings. + """ + + enable_private_nodes = proto.Field( + proto.BOOL, + number=1, + ) + enable_private_endpoint = proto.Field( + proto.BOOL, + number=2, + ) + master_ipv4_cidr_block = proto.Field( + proto.STRING, + number=3, + ) + private_endpoint = proto.Field( + proto.STRING, + number=4, + ) + public_endpoint = proto.Field( + proto.STRING, + number=5, + ) + peering_name = proto.Field( + proto.STRING, + number=7, + ) + master_global_access_config = proto.Field( + proto.MESSAGE, + number=8, + message='PrivateClusterMasterGlobalAccessConfig', + ) + + +class AuthenticatorGroupsConfig(proto.Message): + r"""Configuration for returning group information from + authenticators. + + Attributes: + enabled (bool): + Whether this cluster should return group + membership lookups during authentication using a + group of security groups. + security_group (str): + The name of the security group-of-groups to + be used. Only relevant if enabled = true. + """ + + enabled = proto.Field( + proto.BOOL, + number=1, + ) + security_group = proto.Field( + proto.STRING, + number=2, + ) + + +class CloudRunConfig(proto.Message): + r"""Configuration options for the Cloud Run feature. + Attributes: + disabled (bool): + Whether Cloud Run addon is enabled for this + cluster. + load_balancer_type (google.container_v1.types.CloudRunConfig.LoadBalancerType): + Which load balancer type is installed for + Cloud Run. + """ + class LoadBalancerType(proto.Enum): + r"""Load balancer type of ingress service of Cloud Run.""" + LOAD_BALANCER_TYPE_UNSPECIFIED = 0 + LOAD_BALANCER_TYPE_EXTERNAL = 1 + LOAD_BALANCER_TYPE_INTERNAL = 2 + + disabled = proto.Field( + proto.BOOL, + number=1, + ) + load_balancer_type = proto.Field( + proto.ENUM, + number=3, + enum=LoadBalancerType, + ) + + +class ConfigConnectorConfig(proto.Message): + r"""Configuration options for the Config Connector add-on. + Attributes: + enabled (bool): + Whether Cloud Connector is enabled for this + cluster. + """ + + enabled = proto.Field( + proto.BOOL, + number=1, + ) + + +class MasterAuthorizedNetworksConfig(proto.Message): + r"""Configuration options for the master authorized networks + feature. Enabled master authorized networks will disallow all + external traffic to access Kubernetes master through HTTPS + except traffic from the given CIDR blocks, Google Compute Engine + Public IPs and Google Prod IPs. + + Attributes: + enabled (bool): + Whether or not master authorized networks is + enabled. + cidr_blocks (Sequence[google.container_v1.types.MasterAuthorizedNetworksConfig.CidrBlock]): + cidr_blocks define up to 50 external networks that could + access Kubernetes master through HTTPS. + """ + + class CidrBlock(proto.Message): + r"""CidrBlock contains an optional name and one CIDR block. + Attributes: + display_name (str): + display_name is an optional field for users to identify CIDR + blocks. + cidr_block (str): + cidr_block must be specified in CIDR notation. + """ + + display_name = proto.Field( + proto.STRING, + number=1, + ) + cidr_block = proto.Field( + proto.STRING, + number=2, + ) + + enabled = proto.Field( + proto.BOOL, + number=1, + ) + cidr_blocks = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=CidrBlock, + ) + + +class LegacyAbac(proto.Message): + r"""Configuration for the legacy Attribute Based Access Control + authorization mode. + + Attributes: + enabled (bool): + Whether the ABAC authorizer is enabled for + this cluster. When enabled, identities in the + system, including service accounts, nodes, and + controllers, will have statically granted + permissions beyond those provided by the RBAC + configuration or IAM. + """ + + enabled = proto.Field( + proto.BOOL, + number=1, + ) + + +class NetworkPolicy(proto.Message): + r"""Configuration options for the NetworkPolicy feature. + https://kubernetes.io/docs/concepts/services- + networking/networkpolicies/ + + Attributes: + provider (google.container_v1.types.NetworkPolicy.Provider): + The selected network policy provider. + enabled (bool): + Whether network policy is enabled on the + cluster. + """ + class Provider(proto.Enum): + r"""Allowed Network Policy providers.""" + PROVIDER_UNSPECIFIED = 0 + CALICO = 1 + + provider = proto.Field( + proto.ENUM, + number=1, + enum=Provider, + ) + enabled = proto.Field( + proto.BOOL, + number=2, + ) + + +class BinaryAuthorization(proto.Message): + r"""Configuration for Binary Authorization. + Attributes: + enabled (bool): + Enable Binary Authorization for this cluster. + If enabled, all container images will be + validated by Binary Authorization. + """ + + enabled = proto.Field( + proto.BOOL, + number=1, + ) + + +class IPAllocationPolicy(proto.Message): + r"""Configuration for controlling how IPs are allocated in the + cluster. + + Attributes: + use_ip_aliases (bool): + Whether alias IPs will be used for pod IPs in the cluster. + This is used in conjunction with use_routes. It cannot be + true if use_routes is true. If both use_ip_aliases and + use_routes are false, then the server picks the default IP + allocation mode + create_subnetwork (bool): + Whether a new subnetwork will be created automatically for + the cluster. + + This field is only applicable when ``use_ip_aliases`` is + true. + subnetwork_name (str): + A custom subnetwork name to be used if ``create_subnetwork`` + is true. If this field is empty, then an automatic name will + be chosen for the new subnetwork. + cluster_ipv4_cidr (str): + This field is deprecated, use cluster_ipv4_cidr_block. + node_ipv4_cidr (str): + This field is deprecated, use node_ipv4_cidr_block. + services_ipv4_cidr (str): + This field is deprecated, use services_ipv4_cidr_block. + cluster_secondary_range_name (str): + The name of the secondary range to be used for the cluster + CIDR block. The secondary range will be used for pod IP + addresses. This must be an existing secondary range + associated with the cluster subnetwork. + + This field is only applicable with use_ip_aliases is true + and create_subnetwork is false. + services_secondary_range_name (str): + The name of the secondary range to be used as for the + services CIDR block. The secondary range will be used for + service ClusterIPs. This must be an existing secondary range + associated with the cluster subnetwork. + + This field is only applicable with use_ip_aliases is true + and create_subnetwork is false. + cluster_ipv4_cidr_block (str): + The IP address range for the cluster pod IPs. If this field + is set, then ``cluster.cluster_ipv4_cidr`` must be left + blank. + + This field is only applicable when ``use_ip_aliases`` is + true. + + Set to blank to have a range chosen with the default size. + + Set to /netmask (e.g. ``/14``) to have a range chosen with a + specific netmask. + + Set to a + `CIDR `__ + notation (e.g. ``10.96.0.0/14``) from the RFC-1918 private + networks (e.g. ``10.0.0.0/8``, ``172.16.0.0/12``, + ``192.168.0.0/16``) to pick a specific range to use. + node_ipv4_cidr_block (str): + The IP address range of the instance IPs in this cluster. + + This is applicable only if ``create_subnetwork`` is true. + + Set to blank to have a range chosen with the default size. + + Set to /netmask (e.g. ``/14``) to have a range chosen with a + specific netmask. + + Set to a + `CIDR `__ + notation (e.g. ``10.96.0.0/14``) from the RFC-1918 private + networks (e.g. ``10.0.0.0/8``, ``172.16.0.0/12``, + ``192.168.0.0/16``) to pick a specific range to use. + services_ipv4_cidr_block (str): + The IP address range of the services IPs in this cluster. If + blank, a range will be automatically chosen with the default + size. + + This field is only applicable when ``use_ip_aliases`` is + true. + + Set to blank to have a range chosen with the default size. + + Set to /netmask (e.g. ``/14``) to have a range chosen with a + specific netmask. + + Set to a + `CIDR `__ + notation (e.g. ``10.96.0.0/14``) from the RFC-1918 private + networks (e.g. ``10.0.0.0/8``, ``172.16.0.0/12``, + ``192.168.0.0/16``) to pick a specific range to use. + tpu_ipv4_cidr_block (str): + The IP address range of the Cloud TPUs in this cluster. If + unspecified, a range will be automatically chosen with the + default size. + + This field is only applicable when ``use_ip_aliases`` is + true. + + If unspecified, the range will use the default size. + + Set to /netmask (e.g. ``/14``) to have a range chosen with a + specific netmask. + + Set to a + `CIDR `__ + notation (e.g. ``10.96.0.0/14``) from the RFC-1918 private + networks (e.g. ``10.0.0.0/8``, ``172.16.0.0/12``, + ``192.168.0.0/16``) to pick a specific range to use. + use_routes (bool): + Whether routes will be used for pod IPs in the cluster. This + is used in conjunction with use_ip_aliases. It cannot be + true if use_ip_aliases is true. If both use_ip_aliases and + use_routes are false, then the server picks the default IP + allocation mode + """ + + use_ip_aliases = proto.Field( + proto.BOOL, + number=1, + ) + create_subnetwork = proto.Field( + proto.BOOL, + number=2, + ) + subnetwork_name = proto.Field( + proto.STRING, + number=3, + ) + cluster_ipv4_cidr = proto.Field( + proto.STRING, + number=4, + ) + node_ipv4_cidr = proto.Field( + proto.STRING, + number=5, + ) + services_ipv4_cidr = proto.Field( + proto.STRING, + number=6, + ) + cluster_secondary_range_name = proto.Field( + proto.STRING, + number=7, + ) + services_secondary_range_name = proto.Field( + proto.STRING, + number=8, + ) + cluster_ipv4_cidr_block = proto.Field( + proto.STRING, + number=9, + ) + node_ipv4_cidr_block = proto.Field( + proto.STRING, + number=10, + ) + services_ipv4_cidr_block = proto.Field( + proto.STRING, + number=11, + ) + tpu_ipv4_cidr_block = proto.Field( + proto.STRING, + number=13, + ) + use_routes = proto.Field( + proto.BOOL, + number=15, + ) + + +class Cluster(proto.Message): + r"""A Google Kubernetes Engine cluster. + Attributes: + name (str): + The name of this cluster. The name must be unique within + this project and location (e.g. zone or region), and can be + up to 40 characters with the following restrictions: + + - Lowercase letters, numbers, and hyphens only. + - Must start with a letter. + - Must end with a number or a letter. + description (str): + An optional description of this cluster. + initial_node_count (int): + The number of nodes to create in this cluster. You must + ensure that your Compute Engine `resource + quota `__ is + sufficient for this number of instances. You must also have + available firewall and routes quota. For requests, this + field should only be used in lieu of a "node_pool" object, + since this configuration (along with the "node_config") will + be used to create a "NodePool" object with an auto-generated + name. Do not use this and a node_pool at the same time. + + This field is deprecated, use node_pool.initial_node_count + instead. + node_config (google.container_v1.types.NodeConfig): + Parameters used in creating the cluster's nodes. For + requests, this field should only be used in lieu of a + "node_pool" object, since this configuration (along with the + "initial_node_count") will be used to create a "NodePool" + object with an auto-generated name. Do not use this and a + node_pool at the same time. For responses, this field will + be populated with the node configuration of the first node + pool. (For configuration of each node pool, see + ``node_pool.config``) + + If unspecified, the defaults are used. This field is + deprecated, use node_pool.config instead. + master_auth (google.container_v1.types.MasterAuth): + The authentication information for accessing the master + endpoint. If unspecified, the defaults are used: For + clusters before v1.12, if master_auth is unspecified, + ``username`` will be set to "admin", a random password will + be generated, and a client certificate will be issued. + logging_service (str): + The logging service the cluster should use to write logs. + Currently available options: + + - ``logging.googleapis.com/kubernetes`` - The Cloud Logging + service with a Kubernetes-native resource model + - ``logging.googleapis.com`` - The legacy Cloud Logging + service (no longer available as of GKE 1.15). + - ``none`` - no logs will be exported from the cluster. + + If left as an empty + string,\ ``logging.googleapis.com/kubernetes`` will be used + for GKE 1.14+ or ``logging.googleapis.com`` for earlier + versions. + monitoring_service (str): + The monitoring service the cluster should use to write + metrics. Currently available options: + + - "monitoring.googleapis.com/kubernetes" - The Cloud + Monitoring service with a Kubernetes-native resource + model + - ``monitoring.googleapis.com`` - The legacy Cloud + Monitoring service (no longer available as of GKE 1.15). + - ``none`` - No metrics will be exported from the cluster. + + If left as an empty + string,\ ``monitoring.googleapis.com/kubernetes`` will be + used for GKE 1.14+ or ``monitoring.googleapis.com`` for + earlier versions. + network (str): + The name of the Google Compute Engine + `network `__ + to which the cluster is connected. If left unspecified, the + ``default`` network will be used. + cluster_ipv4_cidr (str): + The IP address range of the container pods in this cluster, + in + `CIDR `__ + notation (e.g. ``10.96.0.0/14``). Leave blank to have one + automatically chosen or specify a ``/14`` block in + ``10.0.0.0/8``. + addons_config (google.container_v1.types.AddonsConfig): + Configurations for the various addons + available to run in the cluster. + subnetwork (str): + The name of the Google Compute Engine + `subnetwork `__ + to which the cluster is connected. + node_pools (Sequence[google.container_v1.types.NodePool]): + The node pools associated with this cluster. This field + should not be set if "node_config" or "initial_node_count" + are specified. + locations (Sequence[str]): + The list of Google Compute Engine + `zones `__ + in which the cluster's nodes should be located. + + This field provides a default value if + `NodePool.Locations `__ + are not specified during node pool creation. + + Warning: changing cluster locations will update the + `NodePool.Locations `__ + of all node pools and will result in nodes being added + and/or removed. + enable_kubernetes_alpha (bool): + Kubernetes alpha features are enabled on this + cluster. This includes alpha API groups (e.g. + v1alpha1) and features that may not be + production ready in the kubernetes version of + the master and nodes. The cluster has no SLA for + uptime and master/node upgrades are disabled. + Alpha enabled clusters are automatically deleted + thirty days after creation. + resource_labels (Sequence[google.container_v1.types.Cluster.ResourceLabelsEntry]): + The resource labels for the cluster to use to + annotate any related Google Compute Engine + resources. + label_fingerprint (str): + The fingerprint of the set of labels for this + cluster. + legacy_abac (google.container_v1.types.LegacyAbac): + Configuration for the legacy ABAC + authorization mode. + network_policy (google.container_v1.types.NetworkPolicy): + Configuration options for the NetworkPolicy + feature. + ip_allocation_policy (google.container_v1.types.IPAllocationPolicy): + Configuration for cluster IP allocation. + master_authorized_networks_config (google.container_v1.types.MasterAuthorizedNetworksConfig): + The configuration options for master + authorized networks feature. + maintenance_policy (google.container_v1.types.MaintenancePolicy): + Configure the maintenance policy for this + cluster. + binary_authorization (google.container_v1.types.BinaryAuthorization): + Configuration for Binary Authorization. + autoscaling (google.container_v1.types.ClusterAutoscaling): + Cluster-level autoscaling configuration. + network_config (google.container_v1.types.NetworkConfig): + Configuration for cluster networking. + default_max_pods_constraint (google.container_v1.types.MaxPodsConstraint): + The default constraint on the maximum number + of pods that can be run simultaneously on a node + in the node pool of this cluster. Only honored + if cluster created with IP Alias support. + resource_usage_export_config (google.container_v1.types.ResourceUsageExportConfig): + Configuration for exporting resource usages. + Resource usage export is disabled when this + config is unspecified. + authenticator_groups_config (google.container_v1.types.AuthenticatorGroupsConfig): + Configuration controlling RBAC group + membership information. + private_cluster_config (google.container_v1.types.PrivateClusterConfig): + Configuration for private cluster. + database_encryption (google.container_v1.types.DatabaseEncryption): + Configuration of etcd encryption. + vertical_pod_autoscaling (google.container_v1.types.VerticalPodAutoscaling): + Cluster-level Vertical Pod Autoscaling + configuration. + shielded_nodes (google.container_v1.types.ShieldedNodes): + Shielded Nodes configuration. + release_channel (google.container_v1.types.ReleaseChannel): + Release channel configuration. + workload_identity_config (google.container_v1.types.WorkloadIdentityConfig): + Configuration for the use of Kubernetes + Service Accounts in GCP IAM policies. + self_link (str): + [Output only] Server-defined URL for the resource. + zone (str): + [Output only] The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field is deprecated, use + location instead. + endpoint (str): + [Output only] The IP address of this cluster's master + endpoint. The endpoint can be accessed from the internet at + ``https://username:password@endpoint/``. + + See the ``masterAuth`` property of this resource for + username and password information. + initial_cluster_version (str): + The initial Kubernetes version for this + cluster. Valid versions are those found in + validMasterVersions returned by getServerConfig. + The version can be upgraded over time; such + upgrades are reflected in currentMasterVersion + and currentNodeVersion. + + Users may specify either explicit versions + offered by Kubernetes Engine or version aliases, + which have the following behavior: + - "latest": picks the highest valid Kubernetes + version - "1.X": picks the highest valid + patch+gke.N patch in the 1.X version - "1.X.Y": + picks the highest valid gke.N patch in the 1.X.Y + version - "1.X.Y-gke.N": picks an explicit + Kubernetes version - "","-": picks the default + Kubernetes version + current_master_version (str): + [Output only] The current software version of the master + endpoint. + current_node_version (str): + [Output only] Deprecated, use + `NodePools.version `__ + instead. The current version of the node software + components. If they are currently at multiple versions + because they're in the process of being upgraded, this + reflects the minimum version of all nodes. + create_time (str): + [Output only] The time the cluster was created, in + `RFC3339 `__ text + format. + status (google.container_v1.types.Cluster.Status): + [Output only] The current status of this cluster. + status_message (str): + [Output only] Deprecated. Use conditions instead. Additional + information about the current status of this cluster, if + available. + node_ipv4_cidr_size (int): + [Output only] The size of the address space on each node for + hosting containers. This is provisioned from within the + ``container_ipv4_cidr`` range. This field will only be set + when cluster is in route-based network mode. + services_ipv4_cidr (str): + [Output only] The IP address range of the Kubernetes + services in this cluster, in + `CIDR `__ + notation (e.g. ``1.2.3.4/29``). Service addresses are + typically put in the last ``/16`` from the container CIDR. + instance_group_urls (Sequence[str]): + Deprecated. Use node_pools.instance_group_urls. + current_node_count (int): + [Output only] The number of nodes currently in the cluster. + Deprecated. Call Kubernetes API directly to retrieve node + information. + expire_time (str): + [Output only] The time the cluster will be automatically + deleted in + `RFC3339 `__ text + format. + location (str): + [Output only] The name of the Google Compute Engine + `zone `__ + or + `region `__ + in which the cluster resides. + enable_tpu (bool): + Enable the ability to use Cloud TPUs in this + cluster. + tpu_ipv4_cidr_block (str): + [Output only] The IP address range of the Cloud TPUs in this + cluster, in + `CIDR `__ + notation (e.g. ``1.2.3.4/29``). + conditions (Sequence[google.container_v1.types.StatusCondition]): + Which conditions caused the current cluster + state. + """ + class Status(proto.Enum): + r"""The current status of the cluster.""" + STATUS_UNSPECIFIED = 0 + PROVISIONING = 1 + RUNNING = 2 + RECONCILING = 3 + STOPPING = 4 + ERROR = 5 + DEGRADED = 6 + + name = proto.Field( + proto.STRING, + number=1, + ) + description = proto.Field( + proto.STRING, + number=2, + ) + initial_node_count = proto.Field( + proto.INT32, + number=3, + ) + node_config = proto.Field( + proto.MESSAGE, + number=4, + message='NodeConfig', + ) + master_auth = proto.Field( + proto.MESSAGE, + number=5, + message='MasterAuth', + ) + logging_service = proto.Field( + proto.STRING, + number=6, + ) + monitoring_service = proto.Field( + proto.STRING, + number=7, + ) + network = proto.Field( + proto.STRING, + number=8, + ) + cluster_ipv4_cidr = proto.Field( + proto.STRING, + number=9, + ) + addons_config = proto.Field( + proto.MESSAGE, + number=10, + message='AddonsConfig', + ) + subnetwork = proto.Field( + proto.STRING, + number=11, + ) + node_pools = proto.RepeatedField( + proto.MESSAGE, + number=12, + message='NodePool', + ) + locations = proto.RepeatedField( + proto.STRING, + number=13, + ) + enable_kubernetes_alpha = proto.Field( + proto.BOOL, + number=14, + ) + resource_labels = proto.MapField( + proto.STRING, + proto.STRING, + number=15, + ) + label_fingerprint = proto.Field( + proto.STRING, + number=16, + ) + legacy_abac = proto.Field( + proto.MESSAGE, + number=18, + message='LegacyAbac', + ) + network_policy = proto.Field( + proto.MESSAGE, + number=19, + message='NetworkPolicy', + ) + ip_allocation_policy = proto.Field( + proto.MESSAGE, + number=20, + message='IPAllocationPolicy', + ) + master_authorized_networks_config = proto.Field( + proto.MESSAGE, + number=22, + message='MasterAuthorizedNetworksConfig', + ) + maintenance_policy = proto.Field( + proto.MESSAGE, + number=23, + message='MaintenancePolicy', + ) + binary_authorization = proto.Field( + proto.MESSAGE, + number=24, + message='BinaryAuthorization', + ) + autoscaling = proto.Field( + proto.MESSAGE, + number=26, + message='ClusterAutoscaling', + ) + network_config = proto.Field( + proto.MESSAGE, + number=27, + message='NetworkConfig', + ) + default_max_pods_constraint = proto.Field( + proto.MESSAGE, + number=30, + message='MaxPodsConstraint', + ) + resource_usage_export_config = proto.Field( + proto.MESSAGE, + number=33, + message='ResourceUsageExportConfig', + ) + authenticator_groups_config = proto.Field( + proto.MESSAGE, + number=34, + message='AuthenticatorGroupsConfig', + ) + private_cluster_config = proto.Field( + proto.MESSAGE, + number=37, + message='PrivateClusterConfig', + ) + database_encryption = proto.Field( + proto.MESSAGE, + number=38, + message='DatabaseEncryption', + ) + vertical_pod_autoscaling = proto.Field( + proto.MESSAGE, + number=39, + message='VerticalPodAutoscaling', + ) + shielded_nodes = proto.Field( + proto.MESSAGE, + number=40, + message='ShieldedNodes', + ) + release_channel = proto.Field( + proto.MESSAGE, + number=41, + message='ReleaseChannel', + ) + workload_identity_config = proto.Field( + proto.MESSAGE, + number=43, + message='WorkloadIdentityConfig', + ) + self_link = proto.Field( + proto.STRING, + number=100, + ) + zone = proto.Field( + proto.STRING, + number=101, + ) + endpoint = proto.Field( + proto.STRING, + number=102, + ) + initial_cluster_version = proto.Field( + proto.STRING, + number=103, + ) + current_master_version = proto.Field( + proto.STRING, + number=104, + ) + current_node_version = proto.Field( + proto.STRING, + number=105, + ) + create_time = proto.Field( + proto.STRING, + number=106, + ) + status = proto.Field( + proto.ENUM, + number=107, + enum=Status, + ) + status_message = proto.Field( + proto.STRING, + number=108, + ) + node_ipv4_cidr_size = proto.Field( + proto.INT32, + number=109, + ) + services_ipv4_cidr = proto.Field( + proto.STRING, + number=110, + ) + instance_group_urls = proto.RepeatedField( + proto.STRING, + number=111, + ) + current_node_count = proto.Field( + proto.INT32, + number=112, + ) + expire_time = proto.Field( + proto.STRING, + number=113, + ) + location = proto.Field( + proto.STRING, + number=114, + ) + enable_tpu = proto.Field( + proto.BOOL, + number=115, + ) + tpu_ipv4_cidr_block = proto.Field( + proto.STRING, + number=116, + ) + conditions = proto.RepeatedField( + proto.MESSAGE, + number=118, + message='StatusCondition', + ) + + +class ClusterUpdate(proto.Message): + r"""ClusterUpdate describes an update to the cluster. Exactly one + update can be applied to a cluster with each request, so at most + one field can be provided. + + Attributes: + desired_node_version (str): + The Kubernetes version to change the nodes to + (typically an upgrade). + + Users may specify either explicit versions + offered by Kubernetes Engine or version aliases, + which have the following behavior: + - "latest": picks the highest valid Kubernetes + version - "1.X": picks the highest valid + patch+gke.N patch in the 1.X version - "1.X.Y": + picks the highest valid gke.N patch in the 1.X.Y + version - "1.X.Y-gke.N": picks an explicit + Kubernetes version - "-": picks the Kubernetes + master version + desired_monitoring_service (str): + The monitoring service the cluster should use to write + metrics. Currently available options: + + - "monitoring.googleapis.com/kubernetes" - The Cloud + Monitoring service with a Kubernetes-native resource + model + - ``monitoring.googleapis.com`` - The legacy Cloud + Monitoring service (no longer available as of GKE 1.15). + - ``none`` - No metrics will be exported from the cluster. + + If left as an empty + string,\ ``monitoring.googleapis.com/kubernetes`` will be + used for GKE 1.14+ or ``monitoring.googleapis.com`` for + earlier versions. + desired_addons_config (google.container_v1.types.AddonsConfig): + Configurations for the various addons + available to run in the cluster. + desired_node_pool_id (str): + The node pool to be upgraded. This field is mandatory if + "desired_node_version", "desired_image_family" or + "desired_node_pool_autoscaling" is specified and there is + more than one node pool on the cluster. + desired_image_type (str): + The desired image type for the node pool. NOTE: Set the + "desired_node_pool" field as well. + desired_database_encryption (google.container_v1.types.DatabaseEncryption): + Configuration of etcd encryption. + desired_workload_identity_config (google.container_v1.types.WorkloadIdentityConfig): + Configuration for Workload Identity. + desired_shielded_nodes (google.container_v1.types.ShieldedNodes): + Configuration for Shielded Nodes. + desired_node_pool_autoscaling (google.container_v1.types.NodePoolAutoscaling): + Autoscaler configuration for the node pool specified in + desired_node_pool_id. If there is only one pool in the + cluster and desired_node_pool_id is not provided then the + change applies to that single node pool. + desired_locations (Sequence[str]): + The desired list of Google Compute Engine + `zones `__ + in which the cluster's nodes should be located. + + This list must always include the cluster's primary zone. + + Warning: changing cluster locations will update the + locations of all node pools and will result in nodes being + added and/or removed. + desired_master_authorized_networks_config (google.container_v1.types.MasterAuthorizedNetworksConfig): + The desired configuration options for master + authorized networks feature. + desired_cluster_autoscaling (google.container_v1.types.ClusterAutoscaling): + Cluster-level autoscaling configuration. + desired_binary_authorization (google.container_v1.types.BinaryAuthorization): + The desired configuration options for the + Binary Authorization feature. + desired_logging_service (str): + The logging service the cluster should use to write logs. + Currently available options: + + - ``logging.googleapis.com/kubernetes`` - The Cloud Logging + service with a Kubernetes-native resource model + - ``logging.googleapis.com`` - The legacy Cloud Logging + service (no longer available as of GKE 1.15). + - ``none`` - no logs will be exported from the cluster. + + If left as an empty + string,\ ``logging.googleapis.com/kubernetes`` will be used + for GKE 1.14+ or ``logging.googleapis.com`` for earlier + versions. + desired_resource_usage_export_config (google.container_v1.types.ResourceUsageExportConfig): + The desired configuration for exporting + resource usage. + desired_vertical_pod_autoscaling (google.container_v1.types.VerticalPodAutoscaling): + Cluster-level Vertical Pod Autoscaling + configuration. + desired_private_cluster_config (google.container_v1.types.PrivateClusterConfig): + The desired private cluster configuration. + desired_intra_node_visibility_config (google.container_v1.types.IntraNodeVisibilityConfig): + The desired config of Intra-node visibility. + desired_default_snat_status (google.container_v1.types.DefaultSnatStatus): + The desired status of whether to disable + default sNAT for this cluster. + desired_release_channel (google.container_v1.types.ReleaseChannel): + The desired release channel configuration. + desired_authenticator_groups_config (google.container_v1.types.AuthenticatorGroupsConfig): + The desired authenticator groups config for + the cluster. + desired_master_version (str): + The Kubernetes version to change the master + to. + Users may specify either explicit versions + offered by Kubernetes Engine or version aliases, + which have the following behavior: + - "latest": picks the highest valid Kubernetes + version - "1.X": picks the highest valid + patch+gke.N patch in the 1.X version - "1.X.Y": + picks the highest valid gke.N patch in the 1.X.Y + version - "1.X.Y-gke.N": picks an explicit + Kubernetes version - "-": picks the default + Kubernetes version + """ + + desired_node_version = proto.Field( + proto.STRING, + number=4, + ) + desired_monitoring_service = proto.Field( + proto.STRING, + number=5, + ) + desired_addons_config = proto.Field( + proto.MESSAGE, + number=6, + message='AddonsConfig', + ) + desired_node_pool_id = proto.Field( + proto.STRING, + number=7, + ) + desired_image_type = proto.Field( + proto.STRING, + number=8, + ) + desired_database_encryption = proto.Field( + proto.MESSAGE, + number=46, + message='DatabaseEncryption', + ) + desired_workload_identity_config = proto.Field( + proto.MESSAGE, + number=47, + message='WorkloadIdentityConfig', + ) + desired_shielded_nodes = proto.Field( + proto.MESSAGE, + number=48, + message='ShieldedNodes', + ) + desired_node_pool_autoscaling = proto.Field( + proto.MESSAGE, + number=9, + message='NodePoolAutoscaling', + ) + desired_locations = proto.RepeatedField( + proto.STRING, + number=10, + ) + desired_master_authorized_networks_config = proto.Field( + proto.MESSAGE, + number=12, + message='MasterAuthorizedNetworksConfig', + ) + desired_cluster_autoscaling = proto.Field( + proto.MESSAGE, + number=15, + message='ClusterAutoscaling', + ) + desired_binary_authorization = proto.Field( + proto.MESSAGE, + number=16, + message='BinaryAuthorization', + ) + desired_logging_service = proto.Field( + proto.STRING, + number=19, + ) + desired_resource_usage_export_config = proto.Field( + proto.MESSAGE, + number=21, + message='ResourceUsageExportConfig', + ) + desired_vertical_pod_autoscaling = proto.Field( + proto.MESSAGE, + number=22, + message='VerticalPodAutoscaling', + ) + desired_private_cluster_config = proto.Field( + proto.MESSAGE, + number=25, + message='PrivateClusterConfig', + ) + desired_intra_node_visibility_config = proto.Field( + proto.MESSAGE, + number=26, + message='IntraNodeVisibilityConfig', + ) + desired_default_snat_status = proto.Field( + proto.MESSAGE, + number=28, + message='DefaultSnatStatus', + ) + desired_release_channel = proto.Field( + proto.MESSAGE, + number=31, + message='ReleaseChannel', + ) + desired_authenticator_groups_config = proto.Field( + proto.MESSAGE, + number=63, + message='AuthenticatorGroupsConfig', + ) + desired_master_version = proto.Field( + proto.STRING, + number=100, + ) + + +class Operation(proto.Message): + r"""This operation resource represents operations that may have + happened or are happening on the cluster. All fields are output + only. + + Attributes: + name (str): + The server-assigned ID for the operation. + zone (str): + The name of the Google Compute Engine + `zone `__ + in which the operation is taking place. This field is + deprecated, use location instead. + operation_type (google.container_v1.types.Operation.Type): + The operation type. + status (google.container_v1.types.Operation.Status): + The current status of the operation. + detail (str): + Detailed operation progress, if available. + status_message (str): + Output only. If an error has occurred, a + textual description of the error. + self_link (str): + Server-defined URL for the resource. + target_link (str): + Server-defined URL for the target of the + operation. + location (str): + [Output only] The name of the Google Compute Engine + `zone `__ + or + `region `__ + in which the cluster resides. + start_time (str): + [Output only] The time the operation started, in + `RFC3339 `__ text + format. + end_time (str): + [Output only] The time the operation completed, in + `RFC3339 `__ text + format. + progress (google.container_v1.types.OperationProgress): + Output only. [Output only] Progress information for an + operation. + cluster_conditions (Sequence[google.container_v1.types.StatusCondition]): + Which conditions caused the current cluster + state. + nodepool_conditions (Sequence[google.container_v1.types.StatusCondition]): + Which conditions caused the current node pool + state. + """ + class Status(proto.Enum): + r"""Current status of the operation.""" + STATUS_UNSPECIFIED = 0 + PENDING = 1 + RUNNING = 2 + DONE = 3 + ABORTING = 4 + + class Type(proto.Enum): + r"""Operation type.""" + TYPE_UNSPECIFIED = 0 + CREATE_CLUSTER = 1 + DELETE_CLUSTER = 2 + UPGRADE_MASTER = 3 + UPGRADE_NODES = 4 + REPAIR_CLUSTER = 5 + UPDATE_CLUSTER = 6 + CREATE_NODE_POOL = 7 + DELETE_NODE_POOL = 8 + SET_NODE_POOL_MANAGEMENT = 9 + AUTO_REPAIR_NODES = 10 + AUTO_UPGRADE_NODES = 11 + SET_LABELS = 12 + SET_MASTER_AUTH = 13 + SET_NODE_POOL_SIZE = 14 + SET_NETWORK_POLICY = 15 + SET_MAINTENANCE_POLICY = 16 + + name = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + operation_type = proto.Field( + proto.ENUM, + number=3, + enum=Type, + ) + status = proto.Field( + proto.ENUM, + number=4, + enum=Status, + ) + detail = proto.Field( + proto.STRING, + number=8, + ) + status_message = proto.Field( + proto.STRING, + number=5, + ) + self_link = proto.Field( + proto.STRING, + number=6, + ) + target_link = proto.Field( + proto.STRING, + number=7, + ) + location = proto.Field( + proto.STRING, + number=9, + ) + start_time = proto.Field( + proto.STRING, + number=10, + ) + end_time = proto.Field( + proto.STRING, + number=11, + ) + progress = proto.Field( + proto.MESSAGE, + number=12, + message='OperationProgress', + ) + cluster_conditions = proto.RepeatedField( + proto.MESSAGE, + number=13, + message='StatusCondition', + ) + nodepool_conditions = proto.RepeatedField( + proto.MESSAGE, + number=14, + message='StatusCondition', + ) + + +class OperationProgress(proto.Message): + r"""Information about operation (or operation stage) progress. + Attributes: + name (str): + A non-parameterized string describing an + operation stage. Unset for single-stage + operations. + status (google.container_v1.types.Operation.Status): + Status of an operation stage. + Unset for single-stage operations. + metrics (Sequence[google.container_v1.types.OperationProgress.Metric]): + Progress metric bundle, for example: metrics: [{name: "nodes + done", int_value: 15}, {name: "nodes total", int_value: 32}] + or metrics: [{name: "progress", double_value: 0.56}, {name: + "progress scale", double_value: 1.0}] + stages (Sequence[google.container_v1.types.OperationProgress]): + Substages of an operation or a stage. + """ + + class Metric(proto.Message): + r"""Progress metric is (string, int|float|string) pair. + Attributes: + name (str): + Required. Metric name, e.g., "nodes total", + "percent done". + int_value (int): + For metrics with integer value. + double_value (float): + For metrics with floating point value. + string_value (str): + For metrics with custom values (ratios, + visual progress, etc.). + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + int_value = proto.Field( + proto.INT64, + number=2, + oneof='value', + ) + double_value = proto.Field( + proto.DOUBLE, + number=3, + oneof='value', + ) + string_value = proto.Field( + proto.STRING, + number=4, + oneof='value', + ) + + name = proto.Field( + proto.STRING, + number=1, + ) + status = proto.Field( + proto.ENUM, + number=2, + enum='Operation.Status', + ) + metrics = proto.RepeatedField( + proto.MESSAGE, + number=3, + message=Metric, + ) + stages = proto.RepeatedField( + proto.MESSAGE, + number=4, + message='OperationProgress', + ) + + +class CreateClusterRequest(proto.Message): + r"""CreateClusterRequest creates a cluster. + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the parent + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the parent field. + cluster (google.container_v1.types.Cluster): + Required. A `cluster + resource `__ + parent (str): + The parent (project and location) where the cluster will be + created. Specified in the format ``projects/*/locations/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster = proto.Field( + proto.MESSAGE, + number=3, + message='Cluster', + ) + parent = proto.Field( + proto.STRING, + number=5, + ) + + +class GetClusterRequest(proto.Message): + r"""GetClusterRequest gets the settings of a cluster. + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Deprecated. The name of the cluster to + retrieve. This field has been deprecated and + replaced by the name field. + name (str): + The name (project, location, cluster) of the cluster to + retrieve. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster_id = proto.Field( + proto.STRING, + number=3, + ) + name = proto.Field( + proto.STRING, + number=5, + ) + + +class UpdateClusterRequest(proto.Message): + r"""UpdateClusterRequest updates the settings of a cluster. + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Deprecated. The name of the cluster to + upgrade. This field has been deprecated and + replaced by the name field. + update (google.container_v1.types.ClusterUpdate): + Required. A description of the update. + name (str): + The name (project, location, cluster) of the cluster to + update. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster_id = proto.Field( + proto.STRING, + number=3, + ) + update = proto.Field( + proto.MESSAGE, + number=4, + message='ClusterUpdate', + ) + name = proto.Field( + proto.STRING, + number=5, + ) + + +class UpdateNodePoolRequest(proto.Message): + r"""UpdateNodePoolRequests update a node pool's image and/or + version. + + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Deprecated. The name of the cluster to + upgrade. This field has been deprecated and + replaced by the name field. + node_pool_id (str): + Deprecated. The name of the node pool to + upgrade. This field has been deprecated and + replaced by the name field. + node_version (str): + Required. The Kubernetes version to change + the nodes to (typically an upgrade). + + Users may specify either explicit versions + offered by Kubernetes Engine or version aliases, + which have the following behavior: + - "latest": picks the highest valid Kubernetes + version - "1.X": picks the highest valid + patch+gke.N patch in the 1.X version - "1.X.Y": + picks the highest valid gke.N patch in the 1.X.Y + version - "1.X.Y-gke.N": picks an explicit + Kubernetes version - "-": picks the Kubernetes + master version + image_type (str): + Required. The desired image type for the node + pool. + name (str): + The name (project, location, cluster, node pool) of the node + pool to update. Specified in the format + ``projects/*/locations/*/clusters/*/nodePools/*``. + locations (Sequence[str]): + The desired list of Google Compute Engine + `zones `__ + in which the node pool's nodes should be located. Changing + the locations for a node pool will result in nodes being + either created or removed from the node pool, depending on + whether locations are being added or removed. + workload_metadata_config (google.container_v1.types.WorkloadMetadataConfig): + The desired workload metadata config for the + node pool. + upgrade_settings (google.container_v1.types.NodePool.UpgradeSettings): + Upgrade settings control disruption and speed + of the upgrade. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster_id = proto.Field( + proto.STRING, + number=3, + ) + node_pool_id = proto.Field( + proto.STRING, + number=4, + ) + node_version = proto.Field( + proto.STRING, + number=5, + ) + image_type = proto.Field( + proto.STRING, + number=6, + ) + name = proto.Field( + proto.STRING, + number=8, + ) + locations = proto.RepeatedField( + proto.STRING, + number=13, + ) + workload_metadata_config = proto.Field( + proto.MESSAGE, + number=14, + message='WorkloadMetadataConfig', + ) + upgrade_settings = proto.Field( + proto.MESSAGE, + number=15, + message='NodePool.UpgradeSettings', + ) + + +class SetNodePoolAutoscalingRequest(proto.Message): + r"""SetNodePoolAutoscalingRequest sets the autoscaler settings of + a node pool. + + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Deprecated. The name of the cluster to + upgrade. This field has been deprecated and + replaced by the name field. + node_pool_id (str): + Deprecated. The name of the node pool to + upgrade. This field has been deprecated and + replaced by the name field. + autoscaling (google.container_v1.types.NodePoolAutoscaling): + Required. Autoscaling configuration for the + node pool. + name (str): + The name (project, location, cluster, node pool) of the node + pool to set autoscaler settings. Specified in the format + ``projects/*/locations/*/clusters/*/nodePools/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster_id = proto.Field( + proto.STRING, + number=3, + ) + node_pool_id = proto.Field( + proto.STRING, + number=4, + ) + autoscaling = proto.Field( + proto.MESSAGE, + number=5, + message='NodePoolAutoscaling', + ) + name = proto.Field( + proto.STRING, + number=6, + ) + + +class SetLoggingServiceRequest(proto.Message): + r"""SetLoggingServiceRequest sets the logging service of a + cluster. + + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Deprecated. The name of the cluster to + upgrade. This field has been deprecated and + replaced by the name field. + logging_service (str): + Required. The logging service the cluster should use to + write logs. Currently available options: + + - ``logging.googleapis.com/kubernetes`` - The Cloud Logging + service with a Kubernetes-native resource model + - ``logging.googleapis.com`` - The legacy Cloud Logging + service (no longer available as of GKE 1.15). + - ``none`` - no logs will be exported from the cluster. + + If left as an empty + string,\ ``logging.googleapis.com/kubernetes`` will be used + for GKE 1.14+ or ``logging.googleapis.com`` for earlier + versions. + name (str): + The name (project, location, cluster) of the cluster to set + logging. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster_id = proto.Field( + proto.STRING, + number=3, + ) + logging_service = proto.Field( + proto.STRING, + number=4, + ) + name = proto.Field( + proto.STRING, + number=5, + ) + + +class SetMonitoringServiceRequest(proto.Message): + r"""SetMonitoringServiceRequest sets the monitoring service of a + cluster. + + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Deprecated. The name of the cluster to + upgrade. This field has been deprecated and + replaced by the name field. + monitoring_service (str): + Required. The monitoring service the cluster should use to + write metrics. Currently available options: + + - "monitoring.googleapis.com/kubernetes" - The Cloud + Monitoring service with a Kubernetes-native resource + model + - ``monitoring.googleapis.com`` - The legacy Cloud + Monitoring service (no longer available as of GKE 1.15). + - ``none`` - No metrics will be exported from the cluster. + + If left as an empty + string,\ ``monitoring.googleapis.com/kubernetes`` will be + used for GKE 1.14+ or ``monitoring.googleapis.com`` for + earlier versions. + name (str): + The name (project, location, cluster) of the cluster to set + monitoring. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster_id = proto.Field( + proto.STRING, + number=3, + ) + monitoring_service = proto.Field( + proto.STRING, + number=4, + ) + name = proto.Field( + proto.STRING, + number=6, + ) + + +class SetAddonsConfigRequest(proto.Message): + r"""SetAddonsConfigRequest sets the addons associated with the + cluster. + + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Deprecated. The name of the cluster to + upgrade. This field has been deprecated and + replaced by the name field. + addons_config (google.container_v1.types.AddonsConfig): + Required. The desired configurations for the + various addons available to run in the cluster. + name (str): + The name (project, location, cluster) of the cluster to set + addons. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster_id = proto.Field( + proto.STRING, + number=3, + ) + addons_config = proto.Field( + proto.MESSAGE, + number=4, + message='AddonsConfig', + ) + name = proto.Field( + proto.STRING, + number=6, + ) + + +class SetLocationsRequest(proto.Message): + r"""SetLocationsRequest sets the locations of the cluster. + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Deprecated. The name of the cluster to + upgrade. This field has been deprecated and + replaced by the name field. + locations (Sequence[str]): + Required. The desired list of Google Compute Engine + `zones `__ + in which the cluster's nodes should be located. Changing the + locations a cluster is in will result in nodes being either + created or removed from the cluster, depending on whether + locations are being added or removed. + + This list must always include the cluster's primary zone. + name (str): + The name (project, location, cluster) of the cluster to set + locations. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster_id = proto.Field( + proto.STRING, + number=3, + ) + locations = proto.RepeatedField( + proto.STRING, + number=4, + ) + name = proto.Field( + proto.STRING, + number=6, + ) + + +class UpdateMasterRequest(proto.Message): + r"""UpdateMasterRequest updates the master of the cluster. + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Deprecated. The name of the cluster to + upgrade. This field has been deprecated and + replaced by the name field. + master_version (str): + Required. The Kubernetes version to change + the master to. + Users may specify either explicit versions + offered by Kubernetes Engine or version aliases, + which have the following behavior: + - "latest": picks the highest valid Kubernetes + version - "1.X": picks the highest valid + patch+gke.N patch in the 1.X version - "1.X.Y": + picks the highest valid gke.N patch in the 1.X.Y + version - "1.X.Y-gke.N": picks an explicit + Kubernetes version - "-": picks the default + Kubernetes version + name (str): + The name (project, location, cluster) of the cluster to + update. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster_id = proto.Field( + proto.STRING, + number=3, + ) + master_version = proto.Field( + proto.STRING, + number=4, + ) + name = proto.Field( + proto.STRING, + number=7, + ) + + +class SetMasterAuthRequest(proto.Message): + r"""SetMasterAuthRequest updates the admin password of a cluster. + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Deprecated. The name of the cluster to + upgrade. This field has been deprecated and + replaced by the name field. + action (google.container_v1.types.SetMasterAuthRequest.Action): + Required. The exact form of action to be + taken on the master auth. + update (google.container_v1.types.MasterAuth): + Required. A description of the update. + name (str): + The name (project, location, cluster) of the cluster to set + auth. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + class Action(proto.Enum): + r"""Operation type: what type update to perform.""" + UNKNOWN = 0 + SET_PASSWORD = 1 + GENERATE_PASSWORD = 2 + SET_USERNAME = 3 + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster_id = proto.Field( + proto.STRING, + number=3, + ) + action = proto.Field( + proto.ENUM, + number=4, + enum=Action, + ) + update = proto.Field( + proto.MESSAGE, + number=5, + message='MasterAuth', + ) + name = proto.Field( + proto.STRING, + number=7, + ) + + +class DeleteClusterRequest(proto.Message): + r"""DeleteClusterRequest deletes a cluster. + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Deprecated. The name of the cluster to + delete. This field has been deprecated and + replaced by the name field. + name (str): + The name (project, location, cluster) of the cluster to + delete. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster_id = proto.Field( + proto.STRING, + number=3, + ) + name = proto.Field( + proto.STRING, + number=4, + ) + + +class ListClustersRequest(proto.Message): + r"""ListClustersRequest lists clusters. + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the parent + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides, or "-" for all zones. This + field has been deprecated and replaced by the parent field. + parent (str): + The parent (project and location) where the clusters will be + listed. Specified in the format ``projects/*/locations/*``. + Location "-" matches all zones and all regions. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + parent = proto.Field( + proto.STRING, + number=4, + ) + + +class ListClustersResponse(proto.Message): + r"""ListClustersResponse is the result of ListClustersRequest. + Attributes: + clusters (Sequence[google.container_v1.types.Cluster]): + A list of clusters in the project in the + specified zone, or across all ones. + missing_zones (Sequence[str]): + If any zones are listed here, the list of + clusters returned may be missing those zones. + """ + + clusters = proto.RepeatedField( + proto.MESSAGE, + number=1, + message='Cluster', + ) + missing_zones = proto.RepeatedField( + proto.STRING, + number=2, + ) + + +class GetOperationRequest(proto.Message): + r"""GetOperationRequest gets a single operation. + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + operation_id (str): + Deprecated. The server-assigned ``name`` of the operation. + This field has been deprecated and replaced by the name + field. + name (str): + The name (project, location, operation id) of the operation + to get. Specified in the format + ``projects/*/locations/*/operations/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + operation_id = proto.Field( + proto.STRING, + number=3, + ) + name = proto.Field( + proto.STRING, + number=5, + ) + + +class ListOperationsRequest(proto.Message): + r"""ListOperationsRequest lists operations. + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the parent + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + to return operations for, or ``-`` for all zones. This field + has been deprecated and replaced by the parent field. + parent (str): + The parent (project and location) where the operations will + be listed. Specified in the format + ``projects/*/locations/*``. Location "-" matches all zones + and all regions. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + parent = proto.Field( + proto.STRING, + number=4, + ) + + +class CancelOperationRequest(proto.Message): + r"""CancelOperationRequest cancels a single operation. + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the operation resides. This field has been + deprecated and replaced by the name field. + operation_id (str): + Deprecated. The server-assigned ``name`` of the operation. + This field has been deprecated and replaced by the name + field. + name (str): + The name (project, location, operation id) of the operation + to cancel. Specified in the format + ``projects/*/locations/*/operations/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + operation_id = proto.Field( + proto.STRING, + number=3, + ) + name = proto.Field( + proto.STRING, + number=4, + ) + + +class ListOperationsResponse(proto.Message): + r"""ListOperationsResponse is the result of + ListOperationsRequest. + + Attributes: + operations (Sequence[google.container_v1.types.Operation]): + A list of operations in the project in the + specified zone. + missing_zones (Sequence[str]): + If any zones are listed here, the list of + operations returned may be missing the + operations from those zones. + """ + + operations = proto.RepeatedField( + proto.MESSAGE, + number=1, + message='Operation', + ) + missing_zones = proto.RepeatedField( + proto.STRING, + number=2, + ) + + +class GetServerConfigRequest(proto.Message): + r"""Gets the current Kubernetes Engine service configuration. + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + to return operations for. This field has been deprecated and + replaced by the name field. + name (str): + The name (project and location) of the server config to get, + specified in the format ``projects/*/locations/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + name = proto.Field( + proto.STRING, + number=4, + ) + + +class ServerConfig(proto.Message): + r"""Kubernetes Engine service configuration. + Attributes: + default_cluster_version (str): + Version of Kubernetes the service deploys by + default. + valid_node_versions (Sequence[str]): + List of valid node upgrade target versions, + in descending order. + default_image_type (str): + Default image type. + valid_image_types (Sequence[str]): + List of valid image types. + valid_master_versions (Sequence[str]): + List of valid master versions, in descending + order. + channels (Sequence[google.container_v1.types.ServerConfig.ReleaseChannelConfig]): + List of release channel configurations. + """ + + class ReleaseChannelConfig(proto.Message): + r"""ReleaseChannelConfig exposes configuration for a release + channel. + + Attributes: + channel (google.container_v1.types.ReleaseChannel.Channel): + The release channel this configuration + applies to. + default_version (str): + The default version for newly created + clusters on the channel. + valid_versions (Sequence[str]): + List of valid versions for the channel. + """ + + channel = proto.Field( + proto.ENUM, + number=1, + enum='ReleaseChannel.Channel', + ) + default_version = proto.Field( + proto.STRING, + number=2, + ) + valid_versions = proto.RepeatedField( + proto.STRING, + number=4, + ) + + default_cluster_version = proto.Field( + proto.STRING, + number=1, + ) + valid_node_versions = proto.RepeatedField( + proto.STRING, + number=3, + ) + default_image_type = proto.Field( + proto.STRING, + number=4, + ) + valid_image_types = proto.RepeatedField( + proto.STRING, + number=5, + ) + valid_master_versions = proto.RepeatedField( + proto.STRING, + number=6, + ) + channels = proto.RepeatedField( + proto.MESSAGE, + number=9, + message=ReleaseChannelConfig, + ) + + +class CreateNodePoolRequest(proto.Message): + r"""CreateNodePoolRequest creates a node pool for a cluster. + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the parent + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the parent field. + cluster_id (str): + Deprecated. The name of the cluster. + This field has been deprecated and replaced by + the parent field. + node_pool (google.container_v1.types.NodePool): + Required. The node pool to create. + parent (str): + The parent (project, location, cluster id) where the node + pool will be created. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster_id = proto.Field( + proto.STRING, + number=3, + ) + node_pool = proto.Field( + proto.MESSAGE, + number=4, + message='NodePool', + ) + parent = proto.Field( + proto.STRING, + number=6, + ) + + +class DeleteNodePoolRequest(proto.Message): + r"""DeleteNodePoolRequest deletes a node pool for a cluster. + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Deprecated. The name of the cluster. + This field has been deprecated and replaced by + the name field. + node_pool_id (str): + Deprecated. The name of the node pool to + delete. This field has been deprecated and + replaced by the name field. + name (str): + The name (project, location, cluster, node pool id) of the + node pool to delete. Specified in the format + ``projects/*/locations/*/clusters/*/nodePools/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster_id = proto.Field( + proto.STRING, + number=3, + ) + node_pool_id = proto.Field( + proto.STRING, + number=4, + ) + name = proto.Field( + proto.STRING, + number=6, + ) + + +class ListNodePoolsRequest(proto.Message): + r"""ListNodePoolsRequest lists the node pool(s) for a cluster. + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the parent + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the parent field. + cluster_id (str): + Deprecated. The name of the cluster. + This field has been deprecated and replaced by + the parent field. + parent (str): + The parent (project, location, cluster id) where the node + pools will be listed. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster_id = proto.Field( + proto.STRING, + number=3, + ) + parent = proto.Field( + proto.STRING, + number=5, + ) + + +class GetNodePoolRequest(proto.Message): + r"""GetNodePoolRequest retrieves a node pool for a cluster. + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Deprecated. The name of the cluster. + This field has been deprecated and replaced by + the name field. + node_pool_id (str): + Deprecated. The name of the node pool. + This field has been deprecated and replaced by + the name field. + name (str): + The name (project, location, cluster, node pool id) of the + node pool to get. Specified in the format + ``projects/*/locations/*/clusters/*/nodePools/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster_id = proto.Field( + proto.STRING, + number=3, + ) + node_pool_id = proto.Field( + proto.STRING, + number=4, + ) + name = proto.Field( + proto.STRING, + number=6, + ) + + +class NodePool(proto.Message): + r"""NodePool contains the name and configuration for a cluster's + node pool. Node pools are a set of nodes (i.e. VM's), with a + common configuration and specification, under the control of the + cluster master. They may have a set of Kubernetes labels applied + to them, which may be used to reference them during pod + scheduling. They may also be resized up or down, to accommodate + the workload. + + Attributes: + name (str): + The name of the node pool. + config (google.container_v1.types.NodeConfig): + The node configuration of the pool. + initial_node_count (int): + The initial node count for the pool. You must ensure that + your Compute Engine `resource + quota `__ is + sufficient for this number of instances. You must also have + available firewall and routes quota. + locations (Sequence[str]): + The list of Google Compute Engine + `zones `__ + in which the NodePool's nodes should be located. + + If this value is unspecified during node pool creation, the + `Cluster.Locations `__ + value will be used, instead. + + Warning: changing node pool locations will result in nodes + being added and/or removed. + self_link (str): + [Output only] Server-defined URL for the resource. + version (str): + The version of the Kubernetes of this node. + instance_group_urls (Sequence[str]): + [Output only] The resource URLs of the `managed instance + groups `__ + associated with this node pool. + status (google.container_v1.types.NodePool.Status): + [Output only] The status of the nodes in this pool instance. + status_message (str): + [Output only] Deprecated. Use conditions instead. Additional + information about the current status of this node pool + instance, if available. + autoscaling (google.container_v1.types.NodePoolAutoscaling): + Autoscaler configuration for this NodePool. + Autoscaler is enabled only if a valid + configuration is present. + management (google.container_v1.types.NodeManagement): + NodeManagement configuration for this + NodePool. + max_pods_constraint (google.container_v1.types.MaxPodsConstraint): + The constraint on the maximum number of pods + that can be run simultaneously on a node in the + node pool. + conditions (Sequence[google.container_v1.types.StatusCondition]): + Which conditions caused the current node pool + state. + pod_ipv4_cidr_size (int): + [Output only] The pod CIDR block size per node in this node + pool. + upgrade_settings (google.container_v1.types.NodePool.UpgradeSettings): + Upgrade settings control disruption and speed + of the upgrade. + """ + class Status(proto.Enum): + r"""The current status of the node pool instance.""" + STATUS_UNSPECIFIED = 0 + PROVISIONING = 1 + RUNNING = 2 + RUNNING_WITH_ERROR = 3 + RECONCILING = 4 + STOPPING = 5 + ERROR = 6 + + class UpgradeSettings(proto.Message): + r"""These upgrade settings control the level of parallelism and + the level of disruption caused by an upgrade. + + maxUnavailable controls the number of nodes that can be + simultaneously unavailable. + + maxSurge controls the number of additional nodes that can be + added to the node pool temporarily for the time of the upgrade + to increase the number of available nodes. + + (maxUnavailable + maxSurge) determines the level of parallelism + (how many nodes are being upgraded at the same time). + + Note: upgrades inevitably introduce some disruption since + workloads need to be moved from old nodes to new, upgraded ones. + Even if maxUnavailable=0, this holds true. (Disruption stays + within the limits of PodDisruptionBudget, if it is configured.) + + Consider a hypothetical node pool with 5 nodes having + maxSurge=2, maxUnavailable=1. This means the upgrade process + upgrades 3 nodes simultaneously. It creates 2 additional + (upgraded) nodes, then it brings down 3 old (not yet upgraded) + nodes at the same time. This ensures that there are always at + least 4 nodes available. + + Attributes: + max_surge (int): + The maximum number of nodes that can be + created beyond the current size of the node pool + during the upgrade process. + max_unavailable (int): + The maximum number of nodes that can be + simultaneously unavailable during the upgrade + process. A node is considered available if its + status is Ready. + """ + + max_surge = proto.Field( + proto.INT32, + number=1, + ) + max_unavailable = proto.Field( + proto.INT32, + number=2, + ) + + name = proto.Field( + proto.STRING, + number=1, + ) + config = proto.Field( + proto.MESSAGE, + number=2, + message='NodeConfig', + ) + initial_node_count = proto.Field( + proto.INT32, + number=3, + ) + locations = proto.RepeatedField( + proto.STRING, + number=13, + ) + self_link = proto.Field( + proto.STRING, + number=100, + ) + version = proto.Field( + proto.STRING, + number=101, + ) + instance_group_urls = proto.RepeatedField( + proto.STRING, + number=102, + ) + status = proto.Field( + proto.ENUM, + number=103, + enum=Status, + ) + status_message = proto.Field( + proto.STRING, + number=104, + ) + autoscaling = proto.Field( + proto.MESSAGE, + number=4, + message='NodePoolAutoscaling', + ) + management = proto.Field( + proto.MESSAGE, + number=5, + message='NodeManagement', + ) + max_pods_constraint = proto.Field( + proto.MESSAGE, + number=6, + message='MaxPodsConstraint', + ) + conditions = proto.RepeatedField( + proto.MESSAGE, + number=105, + message='StatusCondition', + ) + pod_ipv4_cidr_size = proto.Field( + proto.INT32, + number=7, + ) + upgrade_settings = proto.Field( + proto.MESSAGE, + number=107, + message=UpgradeSettings, + ) + + +class NodeManagement(proto.Message): + r"""NodeManagement defines the set of node management services + turned on for the node pool. + + Attributes: + auto_upgrade (bool): + A flag that specifies whether node auto- + pgrade is enabled for the node pool. If enabled, + node auto-upgrade helps keep the nodes in your + node pool up to date with the latest release + version of Kubernetes. + auto_repair (bool): + A flag that specifies whether the node auto- + epair is enabled for the node pool. If enabled, + the nodes in this node pool will be monitored + and, if they fail health checks too many times, + an automatic repair action will be triggered. + upgrade_options (google.container_v1.types.AutoUpgradeOptions): + Specifies the Auto Upgrade knobs for the node + pool. + """ + + auto_upgrade = proto.Field( + proto.BOOL, + number=1, + ) + auto_repair = proto.Field( + proto.BOOL, + number=2, + ) + upgrade_options = proto.Field( + proto.MESSAGE, + number=10, + message='AutoUpgradeOptions', + ) + + +class AutoUpgradeOptions(proto.Message): + r"""AutoUpgradeOptions defines the set of options for the user to + control how the Auto Upgrades will proceed. + + Attributes: + auto_upgrade_start_time (str): + [Output only] This field is set when upgrades are about to + commence with the approximate start time for the upgrades, + in `RFC3339 `__ text + format. + description (str): + [Output only] This field is set when upgrades are about to + commence with the description of the upgrade. + """ + + auto_upgrade_start_time = proto.Field( + proto.STRING, + number=1, + ) + description = proto.Field( + proto.STRING, + number=2, + ) + + +class MaintenancePolicy(proto.Message): + r"""MaintenancePolicy defines the maintenance policy to be used + for the cluster. + + Attributes: + window (google.container_v1.types.MaintenanceWindow): + Specifies the maintenance window in which + maintenance may be performed. + resource_version (str): + A hash identifying the version of this policy, so that + updates to fields of the policy won't accidentally undo + intermediate changes (and so that users of the API unaware + of some fields won't accidentally remove other fields). Make + a ``get()`` request to the cluster to get the current + resource version and include it with requests to set the + policy. + """ + + window = proto.Field( + proto.MESSAGE, + number=1, + message='MaintenanceWindow', + ) + resource_version = proto.Field( + proto.STRING, + number=3, + ) + + +class MaintenanceWindow(proto.Message): + r"""MaintenanceWindow defines the maintenance window to be used + for the cluster. + + Attributes: + daily_maintenance_window (google.container_v1.types.DailyMaintenanceWindow): + DailyMaintenanceWindow specifies a daily + maintenance operation window. + recurring_window (google.container_v1.types.RecurringTimeWindow): + RecurringWindow specifies some number of + recurring time periods for maintenance to occur. + The time windows may be overlapping. If no + maintenance windows are set, maintenance can + occur at any time. + maintenance_exclusions (Sequence[google.container_v1.types.MaintenanceWindow.MaintenanceExclusionsEntry]): + Exceptions to maintenance window. Non- + mergency maintenance should not occur in these + windows. + """ + + daily_maintenance_window = proto.Field( + proto.MESSAGE, + number=2, + oneof='policy', + message='DailyMaintenanceWindow', + ) + recurring_window = proto.Field( + proto.MESSAGE, + number=3, + oneof='policy', + message='RecurringTimeWindow', + ) + maintenance_exclusions = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=4, + message='TimeWindow', + ) + + +class TimeWindow(proto.Message): + r"""Represents an arbitrary window of time. + Attributes: + start_time (google.protobuf.timestamp_pb2.Timestamp): + The time that the window first starts. + end_time (google.protobuf.timestamp_pb2.Timestamp): + The time that the window ends. The end time + should take place after the start time. + """ + + start_time = proto.Field( + proto.MESSAGE, + number=1, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + + +class RecurringTimeWindow(proto.Message): + r"""Represents an arbitrary window of time that recurs. + Attributes: + window (google.container_v1.types.TimeWindow): + The window of the first recurrence. + recurrence (str): + An RRULE + (https://tools.ietf.org/html/rfc5545#section-3.8.5.3) for + how this window reccurs. They go on for the span of time + between the start and end time. + + For example, to have something repeat every weekday, you'd + use: ``FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR`` + + To repeat some window daily (equivalent to the + DailyMaintenanceWindow): ``FREQ=DAILY`` + + For the first weekend of every month: + ``FREQ=MONTHLY;BYSETPOS=1;BYDAY=SA,SU`` + + This specifies how frequently the window starts. Eg, if you + wanted to have a 9-5 UTC-4 window every weekday, you'd use + something like: + + :: + + start time = 2019-01-01T09:00:00-0400 + end time = 2019-01-01T17:00:00-0400 + recurrence = FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR + + Windows can span multiple days. Eg, to make the window + encompass every weekend from midnight Saturday till the last + minute of Sunday UTC: + + :: + + start time = 2019-01-05T00:00:00Z + end time = 2019-01-07T23:59:00Z + recurrence = FREQ=WEEKLY;BYDAY=SA + + Note the start and end time's specific dates are largely + arbitrary except to specify duration of the window and when + it first starts. The FREQ values of HOURLY, MINUTELY, and + SECONDLY are not supported. + """ + + window = proto.Field( + proto.MESSAGE, + number=1, + message='TimeWindow', + ) + recurrence = proto.Field( + proto.STRING, + number=2, + ) + + +class DailyMaintenanceWindow(proto.Message): + r"""Time window specified for daily maintenance operations. + Attributes: + start_time (str): + Time within the maintenance window to start the maintenance + operations. Time format should be in + `RFC3339 `__ format + "HH:MM", where HH : [00-23] and MM : [00-59] GMT. + duration (str): + [Output only] Duration of the time window, automatically + chosen to be smallest possible in the given scenario. + Duration will be in + `RFC3339 `__ format + "PTnHnMnS". + """ + + start_time = proto.Field( + proto.STRING, + number=2, + ) + duration = proto.Field( + proto.STRING, + number=3, + ) + + +class SetNodePoolManagementRequest(proto.Message): + r"""SetNodePoolManagementRequest sets the node management + properties of a node pool. + + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Deprecated. The name of the cluster to + update. This field has been deprecated and + replaced by the name field. + node_pool_id (str): + Deprecated. The name of the node pool to + update. This field has been deprecated and + replaced by the name field. + management (google.container_v1.types.NodeManagement): + Required. NodeManagement configuration for + the node pool. + name (str): + The name (project, location, cluster, node pool id) of the + node pool to set management properties. Specified in the + format ``projects/*/locations/*/clusters/*/nodePools/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster_id = proto.Field( + proto.STRING, + number=3, + ) + node_pool_id = proto.Field( + proto.STRING, + number=4, + ) + management = proto.Field( + proto.MESSAGE, + number=5, + message='NodeManagement', + ) + name = proto.Field( + proto.STRING, + number=7, + ) + + +class SetNodePoolSizeRequest(proto.Message): + r"""SetNodePoolSizeRequest sets the size a node + pool. + + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Deprecated. The name of the cluster to + update. This field has been deprecated and + replaced by the name field. + node_pool_id (str): + Deprecated. The name of the node pool to + update. This field has been deprecated and + replaced by the name field. + node_count (int): + Required. The desired node count for the + pool. + name (str): + The name (project, location, cluster, node pool id) of the + node pool to set size. Specified in the format + ``projects/*/locations/*/clusters/*/nodePools/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster_id = proto.Field( + proto.STRING, + number=3, + ) + node_pool_id = proto.Field( + proto.STRING, + number=4, + ) + node_count = proto.Field( + proto.INT32, + number=5, + ) + name = proto.Field( + proto.STRING, + number=7, + ) + + +class RollbackNodePoolUpgradeRequest(proto.Message): + r"""RollbackNodePoolUpgradeRequest rollbacks the previously + Aborted or Failed NodePool upgrade. This will be an no-op if the + last upgrade successfully completed. + + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Deprecated. The name of the cluster to + rollback. This field has been deprecated and + replaced by the name field. + node_pool_id (str): + Deprecated. The name of the node pool to + rollback. This field has been deprecated and + replaced by the name field. + name (str): + The name (project, location, cluster, node pool id) of the + node poll to rollback upgrade. Specified in the format + ``projects/*/locations/*/clusters/*/nodePools/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster_id = proto.Field( + proto.STRING, + number=3, + ) + node_pool_id = proto.Field( + proto.STRING, + number=4, + ) + name = proto.Field( + proto.STRING, + number=6, + ) + + +class ListNodePoolsResponse(proto.Message): + r"""ListNodePoolsResponse is the result of ListNodePoolsRequest. + Attributes: + node_pools (Sequence[google.container_v1.types.NodePool]): + A list of node pools for a cluster. + """ + + node_pools = proto.RepeatedField( + proto.MESSAGE, + number=1, + message='NodePool', + ) + + +class ClusterAutoscaling(proto.Message): + r"""ClusterAutoscaling contains global, per-cluster information + required by Cluster Autoscaler to automatically adjust the size + of the cluster and create/delete + node pools based on the current needs. + + Attributes: + enable_node_autoprovisioning (bool): + Enables automatic node pool creation and + deletion. + resource_limits (Sequence[google.container_v1.types.ResourceLimit]): + Contains global constraints regarding minimum + and maximum amount of resources in the cluster. + autoprovisioning_node_pool_defaults (google.container_v1.types.AutoprovisioningNodePoolDefaults): + AutoprovisioningNodePoolDefaults contains + defaults for a node pool created by NAP. + autoprovisioning_locations (Sequence[str]): + The list of Google Compute Engine + `zones `__ + in which the NodePool's nodes can be created by NAP. + """ + + enable_node_autoprovisioning = proto.Field( + proto.BOOL, + number=1, + ) + resource_limits = proto.RepeatedField( + proto.MESSAGE, + number=2, + message='ResourceLimit', + ) + autoprovisioning_node_pool_defaults = proto.Field( + proto.MESSAGE, + number=4, + message='AutoprovisioningNodePoolDefaults', + ) + autoprovisioning_locations = proto.RepeatedField( + proto.STRING, + number=5, + ) + + +class AutoprovisioningNodePoolDefaults(proto.Message): + r"""AutoprovisioningNodePoolDefaults contains defaults for a node + pool created by NAP. + + Attributes: + oauth_scopes (Sequence[str]): + Scopes that are used by NAP when creating + node pools. + service_account (str): + The Google Cloud Platform Service Account to + be used by the node VMs. + upgrade_settings (google.container_v1.types.NodePool.UpgradeSettings): + Specifies the upgrade settings for NAP + created node pools + management (google.container_v1.types.NodeManagement): + Specifies the node management options for NAP + created node-pools. + min_cpu_platform (str): + Minimum CPU platform to be used for NAP created node pools. + The instance may be scheduled on the specified or newer CPU + platform. Applicable values are the friendly names of CPU + platforms, such as minCpuPlatform: Intel Haswell or + minCpuPlatform: Intel Sandy Bridge. For more information, + read `how to specify min CPU + platform `__ + To unset the min cpu platform field pass "automatic" as + field value. + disk_size_gb (int): + Size of the disk attached to each node, + specified in GB. The smallest allowed disk size + is 10GB. + If unspecified, the default disk size is 100GB. + disk_type (str): + Type of the disk attached to each node (e.g. + 'pd-standard', 'pd-ssd' or 'pd-balanced') + + If unspecified, the default disk type is 'pd- + standard' + shielded_instance_config (google.container_v1.types.ShieldedInstanceConfig): + Shielded Instance options. + boot_disk_kms_key (str): + The Customer Managed Encryption Key used to encrypt the boot + disk attached to each node in the node pool. This should be + of the form + projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. + For more information about protecting resources with Cloud + KMS Keys please see: + https://cloud.google.com/compute/docs/disks/customer-managed-encryption + image_type (str): + The image type to use for NAP created node. + """ + + oauth_scopes = proto.RepeatedField( + proto.STRING, + number=1, + ) + service_account = proto.Field( + proto.STRING, + number=2, + ) + upgrade_settings = proto.Field( + proto.MESSAGE, + number=3, + message='NodePool.UpgradeSettings', + ) + management = proto.Field( + proto.MESSAGE, + number=4, + message='NodeManagement', + ) + min_cpu_platform = proto.Field( + proto.STRING, + number=5, + ) + disk_size_gb = proto.Field( + proto.INT32, + number=6, + ) + disk_type = proto.Field( + proto.STRING, + number=7, + ) + shielded_instance_config = proto.Field( + proto.MESSAGE, + number=8, + message='ShieldedInstanceConfig', + ) + boot_disk_kms_key = proto.Field( + proto.STRING, + number=9, + ) + image_type = proto.Field( + proto.STRING, + number=10, + ) + + +class ResourceLimit(proto.Message): + r"""Contains information about amount of some resource in the + cluster. For memory, value should be in GB. + + Attributes: + resource_type (str): + Resource name "cpu", "memory" or gpu-specific + string. + minimum (int): + Minimum amount of the resource in the + cluster. + maximum (int): + Maximum amount of the resource in the + cluster. + """ + + resource_type = proto.Field( + proto.STRING, + number=1, + ) + minimum = proto.Field( + proto.INT64, + number=2, + ) + maximum = proto.Field( + proto.INT64, + number=3, + ) + + +class NodePoolAutoscaling(proto.Message): + r"""NodePoolAutoscaling contains information required by cluster + autoscaler to adjust the size of the node pool to the current + cluster usage. + + Attributes: + enabled (bool): + Is autoscaling enabled for this node pool. + min_node_count (int): + Minimum number of nodes in the NodePool. Must be >= 1 and <= + max_node_count. + max_node_count (int): + Maximum number of nodes in the NodePool. Must be >= + min_node_count. There has to enough quota to scale up the + cluster. + autoprovisioned (bool): + Can this node pool be deleted automatically. + """ + + enabled = proto.Field( + proto.BOOL, + number=1, + ) + min_node_count = proto.Field( + proto.INT32, + number=2, + ) + max_node_count = proto.Field( + proto.INT32, + number=3, + ) + autoprovisioned = proto.Field( + proto.BOOL, + number=4, + ) + + +class SetLabelsRequest(proto.Message): + r"""SetLabelsRequest sets the Google Cloud Platform labels on a + Google Container Engine cluster, which will in turn set them for + Google Compute Engine resources used by that cluster + + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Deprecated. The name of the cluster. + This field has been deprecated and replaced by + the name field. + resource_labels (Sequence[google.container_v1.types.SetLabelsRequest.ResourceLabelsEntry]): + Required. The labels to set for that cluster. + label_fingerprint (str): + Required. The fingerprint of the previous set of labels for + this resource, used to detect conflicts. The fingerprint is + initially generated by Kubernetes Engine and changes after + every request to modify or update labels. You must always + provide an up-to-date fingerprint hash when updating or + changing labels. Make a ``get()`` request to the resource to + get the latest fingerprint. + name (str): + The name (project, location, cluster id) of the cluster to + set labels. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster_id = proto.Field( + proto.STRING, + number=3, + ) + resource_labels = proto.MapField( + proto.STRING, + proto.STRING, + number=4, + ) + label_fingerprint = proto.Field( + proto.STRING, + number=5, + ) + name = proto.Field( + proto.STRING, + number=7, + ) + + +class SetLegacyAbacRequest(proto.Message): + r"""SetLegacyAbacRequest enables or disables the ABAC + authorization mechanism for a cluster. + + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Deprecated. The name of the cluster to + update. This field has been deprecated and + replaced by the name field. + enabled (bool): + Required. Whether ABAC authorization will be + enabled in the cluster. + name (str): + The name (project, location, cluster id) of the cluster to + set legacy abac. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster_id = proto.Field( + proto.STRING, + number=3, + ) + enabled = proto.Field( + proto.BOOL, + number=4, + ) + name = proto.Field( + proto.STRING, + number=6, + ) + + +class StartIPRotationRequest(proto.Message): + r"""StartIPRotationRequest creates a new IP for the cluster and + then performs a node upgrade on each node pool to point to the + new IP. + + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Deprecated. The name of the cluster. + This field has been deprecated and replaced by + the name field. + name (str): + The name (project, location, cluster id) of the cluster to + start IP rotation. Specified in the format + ``projects/*/locations/*/clusters/*``. + rotate_credentials (bool): + Whether to rotate credentials during IP + rotation. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster_id = proto.Field( + proto.STRING, + number=3, + ) + name = proto.Field( + proto.STRING, + number=6, + ) + rotate_credentials = proto.Field( + proto.BOOL, + number=7, + ) + + +class CompleteIPRotationRequest(proto.Message): + r"""CompleteIPRotationRequest moves the cluster master back into + single-IP mode. + + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Deprecated. The name of the cluster. + This field has been deprecated and replaced by + the name field. + name (str): + The name (project, location, cluster id) of the cluster to + complete IP rotation. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster_id = proto.Field( + proto.STRING, + number=3, + ) + name = proto.Field( + proto.STRING, + number=7, + ) + + +class AcceleratorConfig(proto.Message): + r"""AcceleratorConfig represents a Hardware Accelerator request. + Attributes: + accelerator_count (int): + The number of the accelerator cards exposed + to an instance. + accelerator_type (str): + The accelerator type resource name. List of supported + accelerators + `here `__ + """ + + accelerator_count = proto.Field( + proto.INT64, + number=1, + ) + accelerator_type = proto.Field( + proto.STRING, + number=2, + ) + + +class WorkloadMetadataConfig(proto.Message): + r"""WorkloadMetadataConfig defines the metadata configuration to + expose to workloads on the node pool. + + Attributes: + mode (google.container_v1.types.WorkloadMetadataConfig.Mode): + Mode is the configuration for how to expose + metadata to workloads running on the node pool. + """ + class Mode(proto.Enum): + r"""Mode is the configuration for how to expose metadata to + workloads running on the node. + """ + MODE_UNSPECIFIED = 0 + GCE_METADATA = 1 + GKE_METADATA = 2 + + mode = proto.Field( + proto.ENUM, + number=2, + enum=Mode, + ) + + +class SetNetworkPolicyRequest(proto.Message): + r"""SetNetworkPolicyRequest enables/disables network policy for a + cluster. + + Attributes: + project_id (str): + Deprecated. The Google Developers Console `project ID or + project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Deprecated. The name of the cluster. + This field has been deprecated and replaced by + the name field. + network_policy (google.container_v1.types.NetworkPolicy): + Required. Configuration options for the + NetworkPolicy feature. + name (str): + The name (project, location, cluster id) of the cluster to + set networking policy. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster_id = proto.Field( + proto.STRING, + number=3, + ) + network_policy = proto.Field( + proto.MESSAGE, + number=4, + message='NetworkPolicy', + ) + name = proto.Field( + proto.STRING, + number=6, + ) + + +class SetMaintenancePolicyRequest(proto.Message): + r"""SetMaintenancePolicyRequest sets the maintenance policy for a + cluster. + + Attributes: + project_id (str): + Required. The Google Developers Console `project ID or + project + number `__. + zone (str): + Required. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. + cluster_id (str): + Required. The name of the cluster to update. + maintenance_policy (google.container_v1.types.MaintenancePolicy): + Required. The maintenance policy to be set + for the cluster. An empty field clears the + existing maintenance policy. + name (str): + The name (project, location, cluster id) of the cluster to + set maintenance policy. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster_id = proto.Field( + proto.STRING, + number=3, + ) + maintenance_policy = proto.Field( + proto.MESSAGE, + number=4, + message='MaintenancePolicy', + ) + name = proto.Field( + proto.STRING, + number=5, + ) + + +class StatusCondition(proto.Message): + r"""StatusCondition describes why a cluster or a node pool has a + certain status (e.g., ERROR or DEGRADED). + + Attributes: + code (google.container_v1.types.StatusCondition.Code): + Machine-friendly representation of the + condition + message (str): + Human-friendly representation of the + condition + """ + class Code(proto.Enum): + r"""Code for each condition""" + UNKNOWN = 0 + GCE_STOCKOUT = 1 + GKE_SERVICE_ACCOUNT_DELETED = 2 + GCE_QUOTA_EXCEEDED = 3 + SET_BY_OPERATOR = 4 + CLOUD_KMS_KEY_ERROR = 7 + + code = proto.Field( + proto.ENUM, + number=1, + enum=Code, + ) + message = proto.Field( + proto.STRING, + number=2, + ) + + +class NetworkConfig(proto.Message): + r"""NetworkConfig reports the relative names of network & + subnetwork. + + Attributes: + network (str): + Output only. The relative name of the Google Compute Engine + [network]`google.container.v1.NetworkConfig.network `__ + to which the cluster is connected. Example: + projects/my-project/global/networks/my-network + subnetwork (str): + Output only. The relative name of the Google Compute Engine + `subnetwork `__ + to which the cluster is connected. Example: + projects/my-project/regions/us-central1/subnetworks/my-subnet + enable_intra_node_visibility (bool): + Whether Intra-node visibility is enabled for + this cluster. This makes same node pod to pod + traffic visible for VPC network. + default_snat_status (google.container_v1.types.DefaultSnatStatus): + Whether the cluster disables default in-node sNAT rules. + In-node sNAT rules will be disabled when default_snat_status + is disabled. When disabled is set to false, default IP + masquerade rules will be applied to the nodes to prevent + sNAT on cluster internal traffic. + """ + + network = proto.Field( + proto.STRING, + number=1, + ) + subnetwork = proto.Field( + proto.STRING, + number=2, + ) + enable_intra_node_visibility = proto.Field( + proto.BOOL, + number=5, + ) + default_snat_status = proto.Field( + proto.MESSAGE, + number=7, + message='DefaultSnatStatus', + ) + + +class GetOpenIDConfigRequest(proto.Message): + r"""GetOpenIDConfigRequest gets the OIDC discovery document for + the cluster. See the OpenID Connect Discovery 1.0 specification + for details. + + Attributes: + parent (str): + The cluster (project, location, cluster id) to get the + discovery document for. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + + +class GetOpenIDConfigResponse(proto.Message): + r"""GetOpenIDConfigResponse is an OIDC discovery document for the + cluster. See the OpenID Connect Discovery 1.0 specification for + details. + + Attributes: + issuer (str): + OIDC Issuer. + jwks_uri (str): + JSON Web Key uri. + response_types_supported (Sequence[str]): + Supported response types. + subject_types_supported (Sequence[str]): + Supported subject types. + id_token_signing_alg_values_supported (Sequence[str]): + supported ID Token signing Algorithms. + claims_supported (Sequence[str]): + Supported claims. + grant_types (Sequence[str]): + Supported grant types. + """ + + issuer = proto.Field( + proto.STRING, + number=1, + ) + jwks_uri = proto.Field( + proto.STRING, + number=2, + ) + response_types_supported = proto.RepeatedField( + proto.STRING, + number=3, + ) + subject_types_supported = proto.RepeatedField( + proto.STRING, + number=4, + ) + id_token_signing_alg_values_supported = proto.RepeatedField( + proto.STRING, + number=5, + ) + claims_supported = proto.RepeatedField( + proto.STRING, + number=6, + ) + grant_types = proto.RepeatedField( + proto.STRING, + number=7, + ) + + +class GetJSONWebKeysRequest(proto.Message): + r"""GetJSONWebKeysRequest gets the public component of the keys used by + the cluster to sign token requests. This will be the jwks_uri for + the discover document returned by getOpenIDConfig. See the OpenID + Connect Discovery 1.0 specification for details. + + Attributes: + parent (str): + The cluster (project, location, cluster id) to get keys for. + Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + + +class Jwk(proto.Message): + r"""Jwk is a JSON Web Key as specified in RFC 7517 + Attributes: + kty (str): + Key Type. + alg (str): + Algorithm. + use (str): + Permitted uses for the public keys. + kid (str): + Key ID. + n (str): + Used for RSA keys. + e (str): + Used for RSA keys. + x (str): + Used for ECDSA keys. + y (str): + Used for ECDSA keys. + crv (str): + Used for ECDSA keys. + """ + + kty = proto.Field( + proto.STRING, + number=1, + ) + alg = proto.Field( + proto.STRING, + number=2, + ) + use = proto.Field( + proto.STRING, + number=3, + ) + kid = proto.Field( + proto.STRING, + number=4, + ) + n = proto.Field( + proto.STRING, + number=5, + ) + e = proto.Field( + proto.STRING, + number=6, + ) + x = proto.Field( + proto.STRING, + number=7, + ) + y = proto.Field( + proto.STRING, + number=8, + ) + crv = proto.Field( + proto.STRING, + number=9, + ) + + +class GetJSONWebKeysResponse(proto.Message): + r"""GetJSONWebKeysResponse is a valid JSON Web Key Set as + specififed in rfc 7517 + + Attributes: + keys (Sequence[google.container_v1.types.Jwk]): + The public component of the keys used by the + cluster to sign token requests. + """ + + keys = proto.RepeatedField( + proto.MESSAGE, + number=1, + message='Jwk', + ) + + +class ReleaseChannel(proto.Message): + r"""ReleaseChannel indicates which release channel a cluster is + subscribed to. Release channels are arranged in order of risk. + When a cluster is subscribed to a release channel, Google + maintains both the master version and the node version. Node + auto-upgrade defaults to true and cannot be disabled. + + Attributes: + channel (google.container_v1.types.ReleaseChannel.Channel): + channel specifies which release channel the + cluster is subscribed to. + """ + class Channel(proto.Enum): + r"""Possible values for 'channel'.""" + UNSPECIFIED = 0 + RAPID = 1 + REGULAR = 2 + STABLE = 3 + + channel = proto.Field( + proto.ENUM, + number=1, + enum=Channel, + ) + + +class IntraNodeVisibilityConfig(proto.Message): + r"""IntraNodeVisibilityConfig contains the desired config of the + intra-node visibility on this cluster. + + Attributes: + enabled (bool): + Enables intra node visibility for this + cluster. + """ + + enabled = proto.Field( + proto.BOOL, + number=1, + ) + + +class MaxPodsConstraint(proto.Message): + r"""Constraints applied to pods. + Attributes: + max_pods_per_node (int): + Constraint enforced on the max num of pods + per node. + """ + + max_pods_per_node = proto.Field( + proto.INT64, + number=1, + ) + + +class WorkloadIdentityConfig(proto.Message): + r"""Configuration for the use of Kubernetes Service Accounts in + GCP IAM policies. + + Attributes: + workload_pool (str): + The workload pool to attach all Kubernetes + service accounts to. + """ + + workload_pool = proto.Field( + proto.STRING, + number=2, + ) + + +class DatabaseEncryption(proto.Message): + r"""Configuration of etcd encryption. + Attributes: + state (google.container_v1.types.DatabaseEncryption.State): + Denotes the state of etcd encryption. + key_name (str): + Name of CloudKMS key to use for the + encryption of secrets in etcd. Ex. projects/my- + project/locations/global/keyRings/my- + ring/cryptoKeys/my-key + """ + class State(proto.Enum): + r"""State of etcd encryption.""" + UNKNOWN = 0 + ENCRYPTED = 1 + DECRYPTED = 2 + + state = proto.Field( + proto.ENUM, + number=2, + enum=State, + ) + key_name = proto.Field( + proto.STRING, + number=1, + ) + + +class ListUsableSubnetworksRequest(proto.Message): + r"""ListUsableSubnetworksRequest requests the list of usable + subnetworks available to a user for creating clusters. + + Attributes: + parent (str): + The parent project where subnetworks are usable. Specified + in the format ``projects/*``. + filter (str): + Filtering currently only supports equality on the + networkProjectId and must be in the form: + "networkProjectId=[PROJECTID]", where ``networkProjectId`` + is the project which owns the listed subnetworks. This + defaults to the parent project ID. + page_size (int): + The max number of results per page that should be returned. + If the number of available results is larger than + ``page_size``, a ``next_page_token`` is returned which can + be used to get the next page of results in subsequent + requests. Acceptable values are 0 to 500, inclusive. + (Default: 500) + page_token (str): + Specifies a page token to use. Set this to + the nextPageToken returned by previous list + requests to get the next page of results. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + + +class ListUsableSubnetworksResponse(proto.Message): + r"""ListUsableSubnetworksResponse is the response of + ListUsableSubnetworksRequest. + + Attributes: + subnetworks (Sequence[google.container_v1.types.UsableSubnetwork]): + A list of usable subnetworks in the specified + network project. + next_page_token (str): + This token allows you to get the next page of results for + list requests. If the number of results is larger than + ``page_size``, use the ``next_page_token`` as a value for + the query parameter ``page_token`` in the next request. The + value will become empty when there are no more pages. + """ + + @property + def raw_page(self): + return self + + subnetworks = proto.RepeatedField( + proto.MESSAGE, + number=1, + message='UsableSubnetwork', + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UsableSubnetworkSecondaryRange(proto.Message): + r"""Secondary IP range of a usable subnetwork. + Attributes: + range_name (str): + The name associated with this subnetwork + secondary range, used when adding an alias IP + range to a VM instance. + ip_cidr_range (str): + The range of IP addresses belonging to this + subnetwork secondary range. + status (google.container_v1.types.UsableSubnetworkSecondaryRange.Status): + This field is to determine the status of the + secondary range programmably. + """ + class Status(proto.Enum): + r"""Status shows the current usage of a secondary IP range.""" + UNKNOWN = 0 + UNUSED = 1 + IN_USE_SERVICE = 2 + IN_USE_SHAREABLE_POD = 3 + IN_USE_MANAGED_POD = 4 + + range_name = proto.Field( + proto.STRING, + number=1, + ) + ip_cidr_range = proto.Field( + proto.STRING, + number=2, + ) + status = proto.Field( + proto.ENUM, + number=3, + enum=Status, + ) + + +class UsableSubnetwork(proto.Message): + r"""UsableSubnetwork resource returns the subnetwork name, its + associated network and the primary CIDR range. + + Attributes: + subnetwork (str): + Subnetwork Name. + Example: projects/my-project/regions/us- + central1/subnetworks/my-subnet + network (str): + Network Name. + Example: projects/my-project/global/networks/my- + network + ip_cidr_range (str): + The range of internal addresses that are + owned by this subnetwork. + secondary_ip_ranges (Sequence[google.container_v1.types.UsableSubnetworkSecondaryRange]): + Secondary IP ranges. + status_message (str): + A human readable status message representing the reasons for + cases where the caller cannot use the secondary ranges under + the subnet. For example if the secondary_ip_ranges is empty + due to a permission issue, an insufficient permission + message will be given by status_message. + """ + + subnetwork = proto.Field( + proto.STRING, + number=1, + ) + network = proto.Field( + proto.STRING, + number=2, + ) + ip_cidr_range = proto.Field( + proto.STRING, + number=3, + ) + secondary_ip_ranges = proto.RepeatedField( + proto.MESSAGE, + number=4, + message='UsableSubnetworkSecondaryRange', + ) + status_message = proto.Field( + proto.STRING, + number=5, + ) + + +class ResourceUsageExportConfig(proto.Message): + r"""Configuration for exporting cluster resource usages. + Attributes: + bigquery_destination (google.container_v1.types.ResourceUsageExportConfig.BigQueryDestination): + Configuration to use BigQuery as usage export + destination. + enable_network_egress_metering (bool): + Whether to enable network egress metering for + this cluster. If enabled, a daemonset will be + created in the cluster to meter network egress + traffic. + consumption_metering_config (google.container_v1.types.ResourceUsageExportConfig.ConsumptionMeteringConfig): + Configuration to enable resource consumption + metering. + """ + + class BigQueryDestination(proto.Message): + r"""Parameters for using BigQuery as the destination of resource + usage export. + + Attributes: + dataset_id (str): + The ID of a BigQuery Dataset. + """ + + dataset_id = proto.Field( + proto.STRING, + number=1, + ) + + class ConsumptionMeteringConfig(proto.Message): + r"""Parameters for controlling consumption metering. + Attributes: + enabled (bool): + Whether to enable consumption metering for + this cluster. If enabled, a second BigQuery + table will be created to hold resource + consumption records. + """ + + enabled = proto.Field( + proto.BOOL, + number=1, + ) + + bigquery_destination = proto.Field( + proto.MESSAGE, + number=1, + message=BigQueryDestination, + ) + enable_network_egress_metering = proto.Field( + proto.BOOL, + number=2, + ) + consumption_metering_config = proto.Field( + proto.MESSAGE, + number=3, + message=ConsumptionMeteringConfig, + ) + + +class VerticalPodAutoscaling(proto.Message): + r"""VerticalPodAutoscaling contains global, per-cluster + information required by Vertical Pod Autoscaler to automatically + adjust the resources of pods controlled by it. + + Attributes: + enabled (bool): + Enables vertical pod autoscaling. + """ + + enabled = proto.Field( + proto.BOOL, + number=1, + ) + + +class DefaultSnatStatus(proto.Message): + r"""DefaultSnatStatus contains the desired state of whether + default sNAT should be disabled on the cluster. + + Attributes: + disabled (bool): + Disables cluster default sNAT rules. + """ + + disabled = proto.Field( + proto.BOOL, + number=1, + ) + + +class ShieldedNodes(proto.Message): + r"""Configuration of Shielded Nodes feature. + Attributes: + enabled (bool): + Whether Shielded Nodes features are enabled + on all nodes in this cluster. + """ + + enabled = proto.Field( + proto.BOOL, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/mypy.ini b/owl-bot-staging/v1/mypy.ini new file mode 100644 index 00000000..4505b485 --- /dev/null +++ b/owl-bot-staging/v1/mypy.ini @@ -0,0 +1,3 @@ +[mypy] +python_version = 3.6 +namespace_packages = True diff --git a/owl-bot-staging/v1/noxfile.py b/owl-bot-staging/v1/noxfile.py new file mode 100644 index 00000000..adbd707b --- /dev/null +++ b/owl-bot-staging/v1/noxfile.py @@ -0,0 +1,132 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import pathlib +import shutil +import subprocess +import sys + + +import nox # type: ignore + +CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() + +LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt" +PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8") + + +nox.sessions = [ + "unit", + "cover", + "mypy", + "check_lower_bounds" + # exclude update_lower_bounds from default + "docs", +] + +@nox.session(python=['3.6', '3.7', '3.8', '3.9']) +def unit(session): + """Run the unit test suite.""" + + session.install('coverage', 'pytest', 'pytest-cov', 'asyncmock', 'pytest-asyncio') + session.install('-e', '.') + + session.run( + 'py.test', + '--quiet', + '--cov=google/container_v1/', + '--cov-config=.coveragerc', + '--cov-report=term', + '--cov-report=html', + os.path.join('tests', 'unit', ''.join(session.posargs)) + ) + + +@nox.session(python='3.7') +def cover(session): + """Run the final coverage report. + This outputs the coverage report aggregating coverage from the unit + test runs (not system test runs), and then erases coverage data. + """ + session.install("coverage", "pytest-cov") + session.run("coverage", "report", "--show-missing", "--fail-under=100") + + session.run("coverage", "erase") + + +@nox.session(python=['3.6', '3.7']) +def mypy(session): + """Run the type checker.""" + session.install('mypy', 'types-pkg_resources') + session.install('.') + session.run( + 'mypy', + '--explicit-package-bases', + 'google', + ) + + +@nox.session +def update_lower_bounds(session): + """Update lower bounds in constraints.txt to match setup.py""" + session.install('google-cloud-testutils') + session.install('.') + + session.run( + 'lower-bound-checker', + 'update', + '--package-name', + PACKAGE_NAME, + '--constraints-file', + str(LOWER_BOUND_CONSTRAINTS_FILE), + ) + + +@nox.session +def check_lower_bounds(session): + """Check lower bounds in setup.py are reflected in constraints file""" + session.install('google-cloud-testutils') + session.install('.') + + session.run( + 'lower-bound-checker', + 'check', + '--package-name', + PACKAGE_NAME, + '--constraints-file', + str(LOWER_BOUND_CONSTRAINTS_FILE), + ) + +@nox.session(python='3.6') +def docs(session): + """Build the docs for this library.""" + + session.install("-e", ".") + session.install("sphinx<3.0.0", "alabaster", "recommonmark") + + shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) + session.run( + "sphinx-build", + "-W", # warnings as errors + "-T", # show full traceback on exception + "-N", # no colors + "-b", + "html", + "-d", + os.path.join("docs", "_build", "doctrees", ""), + os.path.join("docs", ""), + os.path.join("docs", "_build", "html", ""), + ) diff --git a/owl-bot-staging/v1/scripts/fixup_container_v1_keywords.py b/owl-bot-staging/v1/scripts/fixup_container_v1_keywords.py new file mode 100644 index 00000000..308cef05 --- /dev/null +++ b/owl-bot-staging/v1/scripts/fixup_container_v1_keywords.py @@ -0,0 +1,207 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class containerCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + 'cancel_operation': ('project_id', 'zone', 'operation_id', 'name', ), + 'complete_ip_rotation': ('project_id', 'zone', 'cluster_id', 'name', ), + 'create_cluster': ('cluster', 'project_id', 'zone', 'parent', ), + 'create_node_pool': ('node_pool', 'project_id', 'zone', 'cluster_id', 'parent', ), + 'delete_cluster': ('project_id', 'zone', 'cluster_id', 'name', ), + 'delete_node_pool': ('project_id', 'zone', 'cluster_id', 'node_pool_id', 'name', ), + 'get_cluster': ('project_id', 'zone', 'cluster_id', 'name', ), + 'get_json_web_keys': ('parent', ), + 'get_node_pool': ('project_id', 'zone', 'cluster_id', 'node_pool_id', 'name', ), + 'get_operation': ('project_id', 'zone', 'operation_id', 'name', ), + 'get_server_config': ('project_id', 'zone', 'name', ), + 'list_clusters': ('project_id', 'zone', 'parent', ), + 'list_node_pools': ('project_id', 'zone', 'cluster_id', 'parent', ), + 'list_operations': ('project_id', 'zone', 'parent', ), + 'list_usable_subnetworks': ('parent', 'filter', 'page_size', 'page_token', ), + 'rollback_node_pool_upgrade': ('project_id', 'zone', 'cluster_id', 'node_pool_id', 'name', ), + 'set_addons_config': ('addons_config', 'project_id', 'zone', 'cluster_id', 'name', ), + 'set_labels': ('resource_labels', 'label_fingerprint', 'project_id', 'zone', 'cluster_id', 'name', ), + 'set_legacy_abac': ('enabled', 'project_id', 'zone', 'cluster_id', 'name', ), + 'set_locations': ('locations', 'project_id', 'zone', 'cluster_id', 'name', ), + 'set_logging_service': ('logging_service', 'project_id', 'zone', 'cluster_id', 'name', ), + 'set_maintenance_policy': ('project_id', 'zone', 'cluster_id', 'maintenance_policy', 'name', ), + 'set_master_auth': ('action', 'update', 'project_id', 'zone', 'cluster_id', 'name', ), + 'set_monitoring_service': ('monitoring_service', 'project_id', 'zone', 'cluster_id', 'name', ), + 'set_network_policy': ('network_policy', 'project_id', 'zone', 'cluster_id', 'name', ), + 'set_node_pool_autoscaling': ('autoscaling', 'project_id', 'zone', 'cluster_id', 'node_pool_id', 'name', ), + 'set_node_pool_management': ('management', 'project_id', 'zone', 'cluster_id', 'node_pool_id', 'name', ), + 'set_node_pool_size': ('node_count', 'project_id', 'zone', 'cluster_id', 'node_pool_id', 'name', ), + 'start_ip_rotation': ('project_id', 'zone', 'cluster_id', 'name', 'rotate_credentials', ), + 'update_cluster': ('update', 'project_id', 'zone', 'cluster_id', 'name', ), + 'update_master': ('master_version', 'project_id', 'zone', 'cluster_id', 'name', ), + 'update_node_pool': ('node_version', 'image_type', 'project_id', 'zone', 'cluster_id', 'node_pool_id', 'name', 'locations', 'workload_metadata_config', 'upgrade_settings', ), + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), +cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=containerCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the container client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1/setup.py b/owl-bot-staging/v1/setup.py new file mode 100644 index 00000000..eb059ee8 --- /dev/null +++ b/owl-bot-staging/v1/setup.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import io +import os +import setuptools # type: ignore + +version = '0.1.0' + +package_root = os.path.abspath(os.path.dirname(__file__)) + +readme_filename = os.path.join(package_root, 'README.rst') +with io.open(readme_filename, encoding='utf-8') as readme_file: + readme = readme_file.read() + +setuptools.setup( + name='google-container', + version=version, + long_description=readme, + packages=setuptools.PEP420PackageFinder.find(), + namespace_packages=('google',), + platforms='Posix; MacOS X; Windows', + include_package_data=True, + install_requires=( + 'google-api-core[grpc] >= 1.27.0, < 3.0.0dev', + 'libcst >= 0.2.5', + 'proto-plus >= 1.15.0', + 'packaging >= 14.3', ), + python_requires='>=3.6', + classifiers=[ + 'Development Status :: 3 - Alpha', + 'Intended Audience :: Developers', + 'Operating System :: OS Independent', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', + 'Topic :: Internet', + 'Topic :: Software Development :: Libraries :: Python Modules', + ], + zip_safe=False, +) diff --git a/owl-bot-staging/v1/tests/__init__.py b/owl-bot-staging/v1/tests/__init__.py new file mode 100644 index 00000000..b54a5fcc --- /dev/null +++ b/owl-bot-staging/v1/tests/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/tests/unit/__init__.py b/owl-bot-staging/v1/tests/unit/__init__.py new file mode 100644 index 00000000..b54a5fcc --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/tests/unit/gapic/__init__.py b/owl-bot-staging/v1/tests/unit/gapic/__init__.py new file mode 100644 index 00000000..b54a5fcc --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/tests/unit/gapic/container_v1/__init__.py b/owl-bot-staging/v1/tests/unit/gapic/container_v1/__init__.py new file mode 100644 index 00000000..b54a5fcc --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/container_v1/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/tests/unit/gapic/container_v1/test_cluster_manager.py b/owl-bot-staging/v1/tests/unit/gapic/container_v1/test_cluster_manager.py new file mode 100644 index 00000000..7509850a --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/container_v1/test_cluster_manager.py @@ -0,0 +1,9434 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock +import packaging.version + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.container_v1.services.cluster_manager import ClusterManagerAsyncClient +from google.container_v1.services.cluster_manager import ClusterManagerClient +from google.container_v1.services.cluster_manager import pagers +from google.container_v1.services.cluster_manager import transports +from google.container_v1.services.cluster_manager.transports.base import _GOOGLE_AUTH_VERSION +from google.container_v1.types import cluster_service +from google.oauth2 import service_account +from google.protobuf import timestamp_pb2 # type: ignore +import google.auth + + +# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively +# through google-api-core: +# - Delete the auth "less than" test cases +# - Delete these pytest markers (Make the "greater than or equal to" tests the default). +requires_google_auth_lt_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), + reason="This test requires google-auth < 1.25.0", +) +requires_google_auth_gte_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), + reason="This test requires google-auth >= 1.25.0", +) + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert ClusterManagerClient._get_default_mtls_endpoint(None) is None + assert ClusterManagerClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert ClusterManagerClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert ClusterManagerClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert ClusterManagerClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert ClusterManagerClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + ClusterManagerClient, + ClusterManagerAsyncClient, +]) +def test_cluster_manager_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'container.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.ClusterManagerGrpcTransport, "grpc"), + (transports.ClusterManagerGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_cluster_manager_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + ClusterManagerClient, + ClusterManagerAsyncClient, +]) +def test_cluster_manager_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'container.googleapis.com:443' + + +def test_cluster_manager_client_get_transport_class(): + transport = ClusterManagerClient.get_transport_class() + available_transports = [ + transports.ClusterManagerGrpcTransport, + ] + assert transport in available_transports + + transport = ClusterManagerClient.get_transport_class("grpc") + assert transport == transports.ClusterManagerGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ClusterManagerClient, transports.ClusterManagerGrpcTransport, "grpc"), + (ClusterManagerAsyncClient, transports.ClusterManagerGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(ClusterManagerClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ClusterManagerClient)) +@mock.patch.object(ClusterManagerAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ClusterManagerAsyncClient)) +def test_cluster_manager_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(ClusterManagerClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(ClusterManagerClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (ClusterManagerClient, transports.ClusterManagerGrpcTransport, "grpc", "true"), + (ClusterManagerAsyncClient, transports.ClusterManagerGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (ClusterManagerClient, transports.ClusterManagerGrpcTransport, "grpc", "false"), + (ClusterManagerAsyncClient, transports.ClusterManagerGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(ClusterManagerClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ClusterManagerClient)) +@mock.patch.object(ClusterManagerAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ClusterManagerAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_cluster_manager_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ClusterManagerClient, transports.ClusterManagerGrpcTransport, "grpc"), + (ClusterManagerAsyncClient, transports.ClusterManagerGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_cluster_manager_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ClusterManagerClient, transports.ClusterManagerGrpcTransport, "grpc"), + (ClusterManagerAsyncClient, transports.ClusterManagerGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_cluster_manager_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_cluster_manager_client_client_options_from_dict(): + with mock.patch('google.container_v1.services.cluster_manager.transports.ClusterManagerGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = ClusterManagerClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_list_clusters(transport: str = 'grpc', request_type=cluster_service.ListClustersRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListClustersResponse( + missing_zones=['missing_zones_value'], + ) + response = client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.ListClustersRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.ListClustersResponse) + assert response.missing_zones == ['missing_zones_value'] + + +def test_list_clusters_from_dict(): + test_list_clusters(request_type=dict) + + +def test_list_clusters_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__') as call: + client.list_clusters() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.ListClustersRequest() + + +@pytest.mark.asyncio +async def test_list_clusters_async(transport: str = 'grpc_asyncio', request_type=cluster_service.ListClustersRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListClustersResponse( + missing_zones=['missing_zones_value'], + )) + response = await client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.ListClustersRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.ListClustersResponse) + assert response.missing_zones == ['missing_zones_value'] + + +@pytest.mark.asyncio +async def test_list_clusters_async_from_dict(): + await test_list_clusters_async(request_type=dict) + + +def test_list_clusters_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.ListClustersRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__') as call: + call.return_value = cluster_service.ListClustersResponse() + client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_clusters_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.ListClustersRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListClustersResponse()) + await client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_clusters_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListClustersResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_clusters( + project_id='project_id_value', + zone='zone_value', + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].parent == 'parent_value' + + +def test_list_clusters_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_clusters( + cluster_service.ListClustersRequest(), + project_id='project_id_value', + zone='zone_value', + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_clusters_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListClustersResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListClustersResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_clusters( + project_id='project_id_value', + zone='zone_value', + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_clusters_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_clusters( + cluster_service.ListClustersRequest(), + project_id='project_id_value', + zone='zone_value', + parent='parent_value', + ) + + +def test_get_cluster(transport: str = 'grpc', request_type=cluster_service.GetClusterRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Cluster( + name='name_value', + description='description_value', + initial_node_count=1911, + logging_service='logging_service_value', + monitoring_service='monitoring_service_value', + network='network_value', + cluster_ipv4_cidr='cluster_ipv4_cidr_value', + subnetwork='subnetwork_value', + locations=['locations_value'], + enable_kubernetes_alpha=True, + label_fingerprint='label_fingerprint_value', + self_link='self_link_value', + zone='zone_value', + endpoint='endpoint_value', + initial_cluster_version='initial_cluster_version_value', + current_master_version='current_master_version_value', + current_node_version='current_node_version_value', + create_time='create_time_value', + status=cluster_service.Cluster.Status.PROVISIONING, + status_message='status_message_value', + node_ipv4_cidr_size=1955, + services_ipv4_cidr='services_ipv4_cidr_value', + instance_group_urls=['instance_group_urls_value'], + current_node_count=1936, + expire_time='expire_time_value', + location='location_value', + enable_tpu=True, + tpu_ipv4_cidr_block='tpu_ipv4_cidr_block_value', + ) + response = client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.GetClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Cluster) + assert response.name == 'name_value' + assert response.description == 'description_value' + assert response.initial_node_count == 1911 + assert response.logging_service == 'logging_service_value' + assert response.monitoring_service == 'monitoring_service_value' + assert response.network == 'network_value' + assert response.cluster_ipv4_cidr == 'cluster_ipv4_cidr_value' + assert response.subnetwork == 'subnetwork_value' + assert response.locations == ['locations_value'] + assert response.enable_kubernetes_alpha is True + assert response.label_fingerprint == 'label_fingerprint_value' + assert response.self_link == 'self_link_value' + assert response.zone == 'zone_value' + assert response.endpoint == 'endpoint_value' + assert response.initial_cluster_version == 'initial_cluster_version_value' + assert response.current_master_version == 'current_master_version_value' + assert response.current_node_version == 'current_node_version_value' + assert response.create_time == 'create_time_value' + assert response.status == cluster_service.Cluster.Status.PROVISIONING + assert response.status_message == 'status_message_value' + assert response.node_ipv4_cidr_size == 1955 + assert response.services_ipv4_cidr == 'services_ipv4_cidr_value' + assert response.instance_group_urls == ['instance_group_urls_value'] + assert response.current_node_count == 1936 + assert response.expire_time == 'expire_time_value' + assert response.location == 'location_value' + assert response.enable_tpu is True + assert response.tpu_ipv4_cidr_block == 'tpu_ipv4_cidr_block_value' + + +def test_get_cluster_from_dict(): + test_get_cluster(request_type=dict) + + +def test_get_cluster_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_cluster), + '__call__') as call: + client.get_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.GetClusterRequest() + + +@pytest.mark.asyncio +async def test_get_cluster_async(transport: str = 'grpc_asyncio', request_type=cluster_service.GetClusterRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Cluster( + name='name_value', + description='description_value', + initial_node_count=1911, + logging_service='logging_service_value', + monitoring_service='monitoring_service_value', + network='network_value', + cluster_ipv4_cidr='cluster_ipv4_cidr_value', + subnetwork='subnetwork_value', + locations=['locations_value'], + enable_kubernetes_alpha=True, + label_fingerprint='label_fingerprint_value', + self_link='self_link_value', + zone='zone_value', + endpoint='endpoint_value', + initial_cluster_version='initial_cluster_version_value', + current_master_version='current_master_version_value', + current_node_version='current_node_version_value', + create_time='create_time_value', + status=cluster_service.Cluster.Status.PROVISIONING, + status_message='status_message_value', + node_ipv4_cidr_size=1955, + services_ipv4_cidr='services_ipv4_cidr_value', + instance_group_urls=['instance_group_urls_value'], + current_node_count=1936, + expire_time='expire_time_value', + location='location_value', + enable_tpu=True, + tpu_ipv4_cidr_block='tpu_ipv4_cidr_block_value', + )) + response = await client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.GetClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Cluster) + assert response.name == 'name_value' + assert response.description == 'description_value' + assert response.initial_node_count == 1911 + assert response.logging_service == 'logging_service_value' + assert response.monitoring_service == 'monitoring_service_value' + assert response.network == 'network_value' + assert response.cluster_ipv4_cidr == 'cluster_ipv4_cidr_value' + assert response.subnetwork == 'subnetwork_value' + assert response.locations == ['locations_value'] + assert response.enable_kubernetes_alpha is True + assert response.label_fingerprint == 'label_fingerprint_value' + assert response.self_link == 'self_link_value' + assert response.zone == 'zone_value' + assert response.endpoint == 'endpoint_value' + assert response.initial_cluster_version == 'initial_cluster_version_value' + assert response.current_master_version == 'current_master_version_value' + assert response.current_node_version == 'current_node_version_value' + assert response.create_time == 'create_time_value' + assert response.status == cluster_service.Cluster.Status.PROVISIONING + assert response.status_message == 'status_message_value' + assert response.node_ipv4_cidr_size == 1955 + assert response.services_ipv4_cidr == 'services_ipv4_cidr_value' + assert response.instance_group_urls == ['instance_group_urls_value'] + assert response.current_node_count == 1936 + assert response.expire_time == 'expire_time_value' + assert response.location == 'location_value' + assert response.enable_tpu is True + assert response.tpu_ipv4_cidr_block == 'tpu_ipv4_cidr_block_value' + + +@pytest.mark.asyncio +async def test_get_cluster_async_from_dict(): + await test_get_cluster_async(request_type=dict) + + +def test_get_cluster_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.GetClusterRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_cluster), + '__call__') as call: + call.return_value = cluster_service.Cluster() + client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_cluster_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.GetClusterRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_cluster), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Cluster()) + await client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_cluster_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Cluster() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_cluster( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].name == 'name_value' + + +def test_get_cluster_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_cluster( + cluster_service.GetClusterRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_cluster_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Cluster() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Cluster()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_cluster( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_cluster_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_cluster( + cluster_service.GetClusterRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + name='name_value', + ) + + +def test_create_cluster(transport: str = 'grpc', request_type=cluster_service.CreateClusterRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + ) + response = client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.CreateClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +def test_create_cluster_from_dict(): + test_create_cluster(request_type=dict) + + +def test_create_cluster_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_cluster), + '__call__') as call: + client.create_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.CreateClusterRequest() + + +@pytest.mark.asyncio +async def test_create_cluster_async(transport: str = 'grpc_asyncio', request_type=cluster_service.CreateClusterRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + )) + response = await client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.CreateClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +@pytest.mark.asyncio +async def test_create_cluster_async_from_dict(): + await test_create_cluster_async(request_type=dict) + + +def test_create_cluster_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.CreateClusterRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_cluster), + '__call__') as call: + call.return_value = cluster_service.Operation() + client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_cluster_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.CreateClusterRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_cluster), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + await client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_create_cluster_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_cluster( + project_id='project_id_value', + zone='zone_value', + cluster=cluster_service.Cluster(name='name_value'), + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster == cluster_service.Cluster(name='name_value') + assert args[0].parent == 'parent_value' + + +def test_create_cluster_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_cluster( + cluster_service.CreateClusterRequest(), + project_id='project_id_value', + zone='zone_value', + cluster=cluster_service.Cluster(name='name_value'), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_create_cluster_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_cluster( + project_id='project_id_value', + zone='zone_value', + cluster=cluster_service.Cluster(name='name_value'), + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster == cluster_service.Cluster(name='name_value') + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_create_cluster_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_cluster( + cluster_service.CreateClusterRequest(), + project_id='project_id_value', + zone='zone_value', + cluster=cluster_service.Cluster(name='name_value'), + parent='parent_value', + ) + + +def test_update_cluster(transport: str = 'grpc', request_type=cluster_service.UpdateClusterRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + ) + response = client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.UpdateClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +def test_update_cluster_from_dict(): + test_update_cluster(request_type=dict) + + +def test_update_cluster_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_cluster), + '__call__') as call: + client.update_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.UpdateClusterRequest() + + +@pytest.mark.asyncio +async def test_update_cluster_async(transport: str = 'grpc_asyncio', request_type=cluster_service.UpdateClusterRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + )) + response = await client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.UpdateClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +@pytest.mark.asyncio +async def test_update_cluster_async_from_dict(): + await test_update_cluster_async(request_type=dict) + + +def test_update_cluster_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.UpdateClusterRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_cluster), + '__call__') as call: + call.return_value = cluster_service.Operation() + client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_cluster_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.UpdateClusterRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_cluster), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + await client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_update_cluster_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_cluster( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + update=cluster_service.ClusterUpdate(desired_node_version='desired_node_version_value'), + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].update == cluster_service.ClusterUpdate(desired_node_version='desired_node_version_value') + assert args[0].name == 'name_value' + + +def test_update_cluster_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_cluster( + cluster_service.UpdateClusterRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + update=cluster_service.ClusterUpdate(desired_node_version='desired_node_version_value'), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_update_cluster_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_cluster( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + update=cluster_service.ClusterUpdate(desired_node_version='desired_node_version_value'), + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].update == cluster_service.ClusterUpdate(desired_node_version='desired_node_version_value') + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_update_cluster_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_cluster( + cluster_service.UpdateClusterRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + update=cluster_service.ClusterUpdate(desired_node_version='desired_node_version_value'), + name='name_value', + ) + + +def test_update_node_pool(transport: str = 'grpc', request_type=cluster_service.UpdateNodePoolRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_node_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + ) + response = client.update_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.UpdateNodePoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +def test_update_node_pool_from_dict(): + test_update_node_pool(request_type=dict) + + +def test_update_node_pool_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_node_pool), + '__call__') as call: + client.update_node_pool() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.UpdateNodePoolRequest() + + +@pytest.mark.asyncio +async def test_update_node_pool_async(transport: str = 'grpc_asyncio', request_type=cluster_service.UpdateNodePoolRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_node_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + )) + response = await client.update_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.UpdateNodePoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +@pytest.mark.asyncio +async def test_update_node_pool_async_from_dict(): + await test_update_node_pool_async(request_type=dict) + + +def test_update_node_pool_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.UpdateNodePoolRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_node_pool), + '__call__') as call: + call.return_value = cluster_service.Operation() + client.update_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_node_pool_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.UpdateNodePoolRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_node_pool), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + await client.update_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_set_node_pool_autoscaling(transport: str = 'grpc', request_type=cluster_service.SetNodePoolAutoscalingRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_node_pool_autoscaling), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + ) + response = client.set_node_pool_autoscaling(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetNodePoolAutoscalingRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +def test_set_node_pool_autoscaling_from_dict(): + test_set_node_pool_autoscaling(request_type=dict) + + +def test_set_node_pool_autoscaling_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_node_pool_autoscaling), + '__call__') as call: + client.set_node_pool_autoscaling() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetNodePoolAutoscalingRequest() + + +@pytest.mark.asyncio +async def test_set_node_pool_autoscaling_async(transport: str = 'grpc_asyncio', request_type=cluster_service.SetNodePoolAutoscalingRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_node_pool_autoscaling), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + )) + response = await client.set_node_pool_autoscaling(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetNodePoolAutoscalingRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +@pytest.mark.asyncio +async def test_set_node_pool_autoscaling_async_from_dict(): + await test_set_node_pool_autoscaling_async(request_type=dict) + + +def test_set_node_pool_autoscaling_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetNodePoolAutoscalingRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_node_pool_autoscaling), + '__call__') as call: + call.return_value = cluster_service.Operation() + client.set_node_pool_autoscaling(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_set_node_pool_autoscaling_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetNodePoolAutoscalingRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_node_pool_autoscaling), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + await client.set_node_pool_autoscaling(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_set_logging_service(transport: str = 'grpc', request_type=cluster_service.SetLoggingServiceRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_logging_service), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + ) + response = client.set_logging_service(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetLoggingServiceRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +def test_set_logging_service_from_dict(): + test_set_logging_service(request_type=dict) + + +def test_set_logging_service_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_logging_service), + '__call__') as call: + client.set_logging_service() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetLoggingServiceRequest() + + +@pytest.mark.asyncio +async def test_set_logging_service_async(transport: str = 'grpc_asyncio', request_type=cluster_service.SetLoggingServiceRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_logging_service), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + )) + response = await client.set_logging_service(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetLoggingServiceRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +@pytest.mark.asyncio +async def test_set_logging_service_async_from_dict(): + await test_set_logging_service_async(request_type=dict) + + +def test_set_logging_service_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetLoggingServiceRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_logging_service), + '__call__') as call: + call.return_value = cluster_service.Operation() + client.set_logging_service(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_set_logging_service_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetLoggingServiceRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_logging_service), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + await client.set_logging_service(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_set_logging_service_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_logging_service), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.set_logging_service( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + logging_service='logging_service_value', + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].logging_service == 'logging_service_value' + assert args[0].name == 'name_value' + + +def test_set_logging_service_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_logging_service( + cluster_service.SetLoggingServiceRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + logging_service='logging_service_value', + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_set_logging_service_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_logging_service), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.set_logging_service( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + logging_service='logging_service_value', + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].logging_service == 'logging_service_value' + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_set_logging_service_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.set_logging_service( + cluster_service.SetLoggingServiceRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + logging_service='logging_service_value', + name='name_value', + ) + + +def test_set_monitoring_service(transport: str = 'grpc', request_type=cluster_service.SetMonitoringServiceRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_monitoring_service), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + ) + response = client.set_monitoring_service(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetMonitoringServiceRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +def test_set_monitoring_service_from_dict(): + test_set_monitoring_service(request_type=dict) + + +def test_set_monitoring_service_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_monitoring_service), + '__call__') as call: + client.set_monitoring_service() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetMonitoringServiceRequest() + + +@pytest.mark.asyncio +async def test_set_monitoring_service_async(transport: str = 'grpc_asyncio', request_type=cluster_service.SetMonitoringServiceRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_monitoring_service), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + )) + response = await client.set_monitoring_service(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetMonitoringServiceRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +@pytest.mark.asyncio +async def test_set_monitoring_service_async_from_dict(): + await test_set_monitoring_service_async(request_type=dict) + + +def test_set_monitoring_service_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetMonitoringServiceRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_monitoring_service), + '__call__') as call: + call.return_value = cluster_service.Operation() + client.set_monitoring_service(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_set_monitoring_service_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetMonitoringServiceRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_monitoring_service), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + await client.set_monitoring_service(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_set_monitoring_service_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_monitoring_service), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.set_monitoring_service( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + monitoring_service='monitoring_service_value', + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].monitoring_service == 'monitoring_service_value' + assert args[0].name == 'name_value' + + +def test_set_monitoring_service_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_monitoring_service( + cluster_service.SetMonitoringServiceRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + monitoring_service='monitoring_service_value', + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_set_monitoring_service_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_monitoring_service), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.set_monitoring_service( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + monitoring_service='monitoring_service_value', + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].monitoring_service == 'monitoring_service_value' + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_set_monitoring_service_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.set_monitoring_service( + cluster_service.SetMonitoringServiceRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + monitoring_service='monitoring_service_value', + name='name_value', + ) + + +def test_set_addons_config(transport: str = 'grpc', request_type=cluster_service.SetAddonsConfigRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_addons_config), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + ) + response = client.set_addons_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetAddonsConfigRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +def test_set_addons_config_from_dict(): + test_set_addons_config(request_type=dict) + + +def test_set_addons_config_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_addons_config), + '__call__') as call: + client.set_addons_config() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetAddonsConfigRequest() + + +@pytest.mark.asyncio +async def test_set_addons_config_async(transport: str = 'grpc_asyncio', request_type=cluster_service.SetAddonsConfigRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_addons_config), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + )) + response = await client.set_addons_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetAddonsConfigRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +@pytest.mark.asyncio +async def test_set_addons_config_async_from_dict(): + await test_set_addons_config_async(request_type=dict) + + +def test_set_addons_config_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetAddonsConfigRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_addons_config), + '__call__') as call: + call.return_value = cluster_service.Operation() + client.set_addons_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_set_addons_config_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetAddonsConfigRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_addons_config), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + await client.set_addons_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_set_addons_config_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_addons_config), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.set_addons_config( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + addons_config=cluster_service.AddonsConfig(http_load_balancing=cluster_service.HttpLoadBalancing(disabled=True)), + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].addons_config == cluster_service.AddonsConfig(http_load_balancing=cluster_service.HttpLoadBalancing(disabled=True)) + assert args[0].name == 'name_value' + + +def test_set_addons_config_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_addons_config( + cluster_service.SetAddonsConfigRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + addons_config=cluster_service.AddonsConfig(http_load_balancing=cluster_service.HttpLoadBalancing(disabled=True)), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_set_addons_config_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_addons_config), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.set_addons_config( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + addons_config=cluster_service.AddonsConfig(http_load_balancing=cluster_service.HttpLoadBalancing(disabled=True)), + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].addons_config == cluster_service.AddonsConfig(http_load_balancing=cluster_service.HttpLoadBalancing(disabled=True)) + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_set_addons_config_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.set_addons_config( + cluster_service.SetAddonsConfigRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + addons_config=cluster_service.AddonsConfig(http_load_balancing=cluster_service.HttpLoadBalancing(disabled=True)), + name='name_value', + ) + + +def test_set_locations(transport: str = 'grpc', request_type=cluster_service.SetLocationsRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_locations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + ) + response = client.set_locations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetLocationsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +def test_set_locations_from_dict(): + test_set_locations(request_type=dict) + + +def test_set_locations_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_locations), + '__call__') as call: + client.set_locations() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetLocationsRequest() + + +@pytest.mark.asyncio +async def test_set_locations_async(transport: str = 'grpc_asyncio', request_type=cluster_service.SetLocationsRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_locations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + )) + response = await client.set_locations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetLocationsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +@pytest.mark.asyncio +async def test_set_locations_async_from_dict(): + await test_set_locations_async(request_type=dict) + + +def test_set_locations_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetLocationsRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_locations), + '__call__') as call: + call.return_value = cluster_service.Operation() + client.set_locations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_set_locations_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetLocationsRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_locations), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + await client.set_locations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_set_locations_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_locations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.set_locations( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + locations=['locations_value'], + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].locations == ['locations_value'] + assert args[0].name == 'name_value' + + +def test_set_locations_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_locations( + cluster_service.SetLocationsRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + locations=['locations_value'], + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_set_locations_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_locations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.set_locations( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + locations=['locations_value'], + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].locations == ['locations_value'] + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_set_locations_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.set_locations( + cluster_service.SetLocationsRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + locations=['locations_value'], + name='name_value', + ) + + +def test_update_master(transport: str = 'grpc', request_type=cluster_service.UpdateMasterRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_master), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + ) + response = client.update_master(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.UpdateMasterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +def test_update_master_from_dict(): + test_update_master(request_type=dict) + + +def test_update_master_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_master), + '__call__') as call: + client.update_master() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.UpdateMasterRequest() + + +@pytest.mark.asyncio +async def test_update_master_async(transport: str = 'grpc_asyncio', request_type=cluster_service.UpdateMasterRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_master), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + )) + response = await client.update_master(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.UpdateMasterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +@pytest.mark.asyncio +async def test_update_master_async_from_dict(): + await test_update_master_async(request_type=dict) + + +def test_update_master_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.UpdateMasterRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_master), + '__call__') as call: + call.return_value = cluster_service.Operation() + client.update_master(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_master_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.UpdateMasterRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_master), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + await client.update_master(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_update_master_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_master), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_master( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + master_version='master_version_value', + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].master_version == 'master_version_value' + assert args[0].name == 'name_value' + + +def test_update_master_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_master( + cluster_service.UpdateMasterRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + master_version='master_version_value', + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_update_master_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_master), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_master( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + master_version='master_version_value', + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].master_version == 'master_version_value' + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_update_master_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_master( + cluster_service.UpdateMasterRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + master_version='master_version_value', + name='name_value', + ) + + +def test_set_master_auth(transport: str = 'grpc', request_type=cluster_service.SetMasterAuthRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_master_auth), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + ) + response = client.set_master_auth(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetMasterAuthRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +def test_set_master_auth_from_dict(): + test_set_master_auth(request_type=dict) + + +def test_set_master_auth_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_master_auth), + '__call__') as call: + client.set_master_auth() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetMasterAuthRequest() + + +@pytest.mark.asyncio +async def test_set_master_auth_async(transport: str = 'grpc_asyncio', request_type=cluster_service.SetMasterAuthRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_master_auth), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + )) + response = await client.set_master_auth(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetMasterAuthRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +@pytest.mark.asyncio +async def test_set_master_auth_async_from_dict(): + await test_set_master_auth_async(request_type=dict) + + +def test_set_master_auth_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetMasterAuthRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_master_auth), + '__call__') as call: + call.return_value = cluster_service.Operation() + client.set_master_auth(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_set_master_auth_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetMasterAuthRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_master_auth), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + await client.set_master_auth(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_delete_cluster(transport: str = 'grpc', request_type=cluster_service.DeleteClusterRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + ) + response = client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.DeleteClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +def test_delete_cluster_from_dict(): + test_delete_cluster(request_type=dict) + + +def test_delete_cluster_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_cluster), + '__call__') as call: + client.delete_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.DeleteClusterRequest() + + +@pytest.mark.asyncio +async def test_delete_cluster_async(transport: str = 'grpc_asyncio', request_type=cluster_service.DeleteClusterRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + )) + response = await client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.DeleteClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +@pytest.mark.asyncio +async def test_delete_cluster_async_from_dict(): + await test_delete_cluster_async(request_type=dict) + + +def test_delete_cluster_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.DeleteClusterRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_cluster), + '__call__') as call: + call.return_value = cluster_service.Operation() + client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_cluster_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.DeleteClusterRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_cluster), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + await client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_delete_cluster_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_cluster( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].name == 'name_value' + + +def test_delete_cluster_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_cluster( + cluster_service.DeleteClusterRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_delete_cluster_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_cluster( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_delete_cluster_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_cluster( + cluster_service.DeleteClusterRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + name='name_value', + ) + + +def test_list_operations(transport: str = 'grpc', request_type=cluster_service.ListOperationsRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_operations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListOperationsResponse( + missing_zones=['missing_zones_value'], + ) + response = client.list_operations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.ListOperationsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.ListOperationsResponse) + assert response.missing_zones == ['missing_zones_value'] + + +def test_list_operations_from_dict(): + test_list_operations(request_type=dict) + + +def test_list_operations_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_operations), + '__call__') as call: + client.list_operations() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.ListOperationsRequest() + + +@pytest.mark.asyncio +async def test_list_operations_async(transport: str = 'grpc_asyncio', request_type=cluster_service.ListOperationsRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_operations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListOperationsResponse( + missing_zones=['missing_zones_value'], + )) + response = await client.list_operations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.ListOperationsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.ListOperationsResponse) + assert response.missing_zones == ['missing_zones_value'] + + +@pytest.mark.asyncio +async def test_list_operations_async_from_dict(): + await test_list_operations_async(request_type=dict) + + +def test_list_operations_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.ListOperationsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_operations), + '__call__') as call: + call.return_value = cluster_service.ListOperationsResponse() + client.list_operations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.ListOperationsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_operations), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListOperationsResponse()) + await client.list_operations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_operations_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_operations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListOperationsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_operations( + project_id='project_id_value', + zone='zone_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + + +def test_list_operations_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_operations( + cluster_service.ListOperationsRequest(), + project_id='project_id_value', + zone='zone_value', + ) + + +@pytest.mark.asyncio +async def test_list_operations_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_operations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListOperationsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListOperationsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_operations( + project_id='project_id_value', + zone='zone_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + + +@pytest.mark.asyncio +async def test_list_operations_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_operations( + cluster_service.ListOperationsRequest(), + project_id='project_id_value', + zone='zone_value', + ) + + +def test_get_operation(transport: str = 'grpc', request_type=cluster_service.GetOperationRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_operation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + ) + response = client.get_operation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.GetOperationRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +def test_get_operation_from_dict(): + test_get_operation(request_type=dict) + + +def test_get_operation_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_operation), + '__call__') as call: + client.get_operation() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.GetOperationRequest() + + +@pytest.mark.asyncio +async def test_get_operation_async(transport: str = 'grpc_asyncio', request_type=cluster_service.GetOperationRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_operation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + )) + response = await client.get_operation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.GetOperationRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +@pytest.mark.asyncio +async def test_get_operation_async_from_dict(): + await test_get_operation_async(request_type=dict) + + +def test_get_operation_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.GetOperationRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_operation), + '__call__') as call: + call.return_value = cluster_service.Operation() + client.get_operation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.GetOperationRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_operation), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + await client.get_operation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_operation_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_operation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_operation( + project_id='project_id_value', + zone='zone_value', + operation_id='operation_id_value', + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].operation_id == 'operation_id_value' + assert args[0].name == 'name_value' + + +def test_get_operation_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_operation( + cluster_service.GetOperationRequest(), + project_id='project_id_value', + zone='zone_value', + operation_id='operation_id_value', + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_operation_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_operation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_operation( + project_id='project_id_value', + zone='zone_value', + operation_id='operation_id_value', + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].operation_id == 'operation_id_value' + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_operation_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_operation( + cluster_service.GetOperationRequest(), + project_id='project_id_value', + zone='zone_value', + operation_id='operation_id_value', + name='name_value', + ) + + +def test_cancel_operation(transport: str = 'grpc', request_type=cluster_service.CancelOperationRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_operation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_operation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.CancelOperationRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_cancel_operation_from_dict(): + test_cancel_operation(request_type=dict) + + +def test_cancel_operation_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_operation), + '__call__') as call: + client.cancel_operation() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.CancelOperationRequest() + + +@pytest.mark.asyncio +async def test_cancel_operation_async(transport: str = 'grpc_asyncio', request_type=cluster_service.CancelOperationRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_operation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_operation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.CancelOperationRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_cancel_operation_async_from_dict(): + await test_cancel_operation_async(request_type=dict) + + +def test_cancel_operation_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.CancelOperationRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_operation), + '__call__') as call: + call.return_value = None + client.cancel_operation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.CancelOperationRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_operation), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.cancel_operation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_cancel_operation_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_operation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.cancel_operation( + project_id='project_id_value', + zone='zone_value', + operation_id='operation_id_value', + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].operation_id == 'operation_id_value' + assert args[0].name == 'name_value' + + +def test_cancel_operation_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.cancel_operation( + cluster_service.CancelOperationRequest(), + project_id='project_id_value', + zone='zone_value', + operation_id='operation_id_value', + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_cancel_operation_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_operation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.cancel_operation( + project_id='project_id_value', + zone='zone_value', + operation_id='operation_id_value', + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].operation_id == 'operation_id_value' + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_cancel_operation_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.cancel_operation( + cluster_service.CancelOperationRequest(), + project_id='project_id_value', + zone='zone_value', + operation_id='operation_id_value', + name='name_value', + ) + + +def test_get_server_config(transport: str = 'grpc', request_type=cluster_service.GetServerConfigRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_server_config), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ServerConfig( + default_cluster_version='default_cluster_version_value', + valid_node_versions=['valid_node_versions_value'], + default_image_type='default_image_type_value', + valid_image_types=['valid_image_types_value'], + valid_master_versions=['valid_master_versions_value'], + ) + response = client.get_server_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.GetServerConfigRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.ServerConfig) + assert response.default_cluster_version == 'default_cluster_version_value' + assert response.valid_node_versions == ['valid_node_versions_value'] + assert response.default_image_type == 'default_image_type_value' + assert response.valid_image_types == ['valid_image_types_value'] + assert response.valid_master_versions == ['valid_master_versions_value'] + + +def test_get_server_config_from_dict(): + test_get_server_config(request_type=dict) + + +def test_get_server_config_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_server_config), + '__call__') as call: + client.get_server_config() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.GetServerConfigRequest() + + +@pytest.mark.asyncio +async def test_get_server_config_async(transport: str = 'grpc_asyncio', request_type=cluster_service.GetServerConfigRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_server_config), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ServerConfig( + default_cluster_version='default_cluster_version_value', + valid_node_versions=['valid_node_versions_value'], + default_image_type='default_image_type_value', + valid_image_types=['valid_image_types_value'], + valid_master_versions=['valid_master_versions_value'], + )) + response = await client.get_server_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.GetServerConfigRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.ServerConfig) + assert response.default_cluster_version == 'default_cluster_version_value' + assert response.valid_node_versions == ['valid_node_versions_value'] + assert response.default_image_type == 'default_image_type_value' + assert response.valid_image_types == ['valid_image_types_value'] + assert response.valid_master_versions == ['valid_master_versions_value'] + + +@pytest.mark.asyncio +async def test_get_server_config_async_from_dict(): + await test_get_server_config_async(request_type=dict) + + +def test_get_server_config_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.GetServerConfigRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_server_config), + '__call__') as call: + call.return_value = cluster_service.ServerConfig() + client.get_server_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_server_config_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.GetServerConfigRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_server_config), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ServerConfig()) + await client.get_server_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_server_config_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_server_config), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ServerConfig() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_server_config( + project_id='project_id_value', + zone='zone_value', + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].name == 'name_value' + + +def test_get_server_config_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_server_config( + cluster_service.GetServerConfigRequest(), + project_id='project_id_value', + zone='zone_value', + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_server_config_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_server_config), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ServerConfig() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ServerConfig()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_server_config( + project_id='project_id_value', + zone='zone_value', + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_server_config_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_server_config( + cluster_service.GetServerConfigRequest(), + project_id='project_id_value', + zone='zone_value', + name='name_value', + ) + + +def test_get_json_web_keys(transport: str = 'grpc', request_type=cluster_service.GetJSONWebKeysRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_json_web_keys), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.GetJSONWebKeysResponse( + ) + response = client.get_json_web_keys(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.GetJSONWebKeysRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.GetJSONWebKeysResponse) + + +def test_get_json_web_keys_from_dict(): + test_get_json_web_keys(request_type=dict) + + +def test_get_json_web_keys_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_json_web_keys), + '__call__') as call: + client.get_json_web_keys() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.GetJSONWebKeysRequest() + + +@pytest.mark.asyncio +async def test_get_json_web_keys_async(transport: str = 'grpc_asyncio', request_type=cluster_service.GetJSONWebKeysRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_json_web_keys), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.GetJSONWebKeysResponse( + )) + response = await client.get_json_web_keys(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.GetJSONWebKeysRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.GetJSONWebKeysResponse) + + +@pytest.mark.asyncio +async def test_get_json_web_keys_async_from_dict(): + await test_get_json_web_keys_async(request_type=dict) + + +def test_get_json_web_keys_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.GetJSONWebKeysRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_json_web_keys), + '__call__') as call: + call.return_value = cluster_service.GetJSONWebKeysResponse() + client.get_json_web_keys(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_json_web_keys_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.GetJSONWebKeysRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_json_web_keys), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.GetJSONWebKeysResponse()) + await client.get_json_web_keys(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_node_pools(transport: str = 'grpc', request_type=cluster_service.ListNodePoolsRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_node_pools), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListNodePoolsResponse( + ) + response = client.list_node_pools(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.ListNodePoolsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.ListNodePoolsResponse) + + +def test_list_node_pools_from_dict(): + test_list_node_pools(request_type=dict) + + +def test_list_node_pools_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_node_pools), + '__call__') as call: + client.list_node_pools() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.ListNodePoolsRequest() + + +@pytest.mark.asyncio +async def test_list_node_pools_async(transport: str = 'grpc_asyncio', request_type=cluster_service.ListNodePoolsRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_node_pools), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListNodePoolsResponse( + )) + response = await client.list_node_pools(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.ListNodePoolsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.ListNodePoolsResponse) + + +@pytest.mark.asyncio +async def test_list_node_pools_async_from_dict(): + await test_list_node_pools_async(request_type=dict) + + +def test_list_node_pools_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.ListNodePoolsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_node_pools), + '__call__') as call: + call.return_value = cluster_service.ListNodePoolsResponse() + client.list_node_pools(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_node_pools_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.ListNodePoolsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_node_pools), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListNodePoolsResponse()) + await client.list_node_pools(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_node_pools_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_node_pools), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListNodePoolsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_node_pools( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].parent == 'parent_value' + + +def test_list_node_pools_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_node_pools( + cluster_service.ListNodePoolsRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_node_pools_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_node_pools), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListNodePoolsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListNodePoolsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_node_pools( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_node_pools_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_node_pools( + cluster_service.ListNodePoolsRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + parent='parent_value', + ) + + +def test_get_node_pool(transport: str = 'grpc', request_type=cluster_service.GetNodePoolRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_node_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.NodePool( + name='name_value', + initial_node_count=1911, + locations=['locations_value'], + self_link='self_link_value', + version='version_value', + instance_group_urls=['instance_group_urls_value'], + status=cluster_service.NodePool.Status.PROVISIONING, + status_message='status_message_value', + pod_ipv4_cidr_size=1856, + ) + response = client.get_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.GetNodePoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.NodePool) + assert response.name == 'name_value' + assert response.initial_node_count == 1911 + assert response.locations == ['locations_value'] + assert response.self_link == 'self_link_value' + assert response.version == 'version_value' + assert response.instance_group_urls == ['instance_group_urls_value'] + assert response.status == cluster_service.NodePool.Status.PROVISIONING + assert response.status_message == 'status_message_value' + assert response.pod_ipv4_cidr_size == 1856 + + +def test_get_node_pool_from_dict(): + test_get_node_pool(request_type=dict) + + +def test_get_node_pool_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_node_pool), + '__call__') as call: + client.get_node_pool() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.GetNodePoolRequest() + + +@pytest.mark.asyncio +async def test_get_node_pool_async(transport: str = 'grpc_asyncio', request_type=cluster_service.GetNodePoolRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_node_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.NodePool( + name='name_value', + initial_node_count=1911, + locations=['locations_value'], + self_link='self_link_value', + version='version_value', + instance_group_urls=['instance_group_urls_value'], + status=cluster_service.NodePool.Status.PROVISIONING, + status_message='status_message_value', + pod_ipv4_cidr_size=1856, + )) + response = await client.get_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.GetNodePoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.NodePool) + assert response.name == 'name_value' + assert response.initial_node_count == 1911 + assert response.locations == ['locations_value'] + assert response.self_link == 'self_link_value' + assert response.version == 'version_value' + assert response.instance_group_urls == ['instance_group_urls_value'] + assert response.status == cluster_service.NodePool.Status.PROVISIONING + assert response.status_message == 'status_message_value' + assert response.pod_ipv4_cidr_size == 1856 + + +@pytest.mark.asyncio +async def test_get_node_pool_async_from_dict(): + await test_get_node_pool_async(request_type=dict) + + +def test_get_node_pool_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.GetNodePoolRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_node_pool), + '__call__') as call: + call.return_value = cluster_service.NodePool() + client.get_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_node_pool_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.GetNodePoolRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_node_pool), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.NodePool()) + await client.get_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_node_pool_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_node_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.NodePool() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_node_pool( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + node_pool_id='node_pool_id_value', + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].node_pool_id == 'node_pool_id_value' + assert args[0].name == 'name_value' + + +def test_get_node_pool_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_node_pool( + cluster_service.GetNodePoolRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + node_pool_id='node_pool_id_value', + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_get_node_pool_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_node_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.NodePool() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.NodePool()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_node_pool( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + node_pool_id='node_pool_id_value', + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].node_pool_id == 'node_pool_id_value' + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_get_node_pool_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_node_pool( + cluster_service.GetNodePoolRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + node_pool_id='node_pool_id_value', + name='name_value', + ) + + +def test_create_node_pool(transport: str = 'grpc', request_type=cluster_service.CreateNodePoolRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_node_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + ) + response = client.create_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.CreateNodePoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +def test_create_node_pool_from_dict(): + test_create_node_pool(request_type=dict) + + +def test_create_node_pool_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_node_pool), + '__call__') as call: + client.create_node_pool() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.CreateNodePoolRequest() + + +@pytest.mark.asyncio +async def test_create_node_pool_async(transport: str = 'grpc_asyncio', request_type=cluster_service.CreateNodePoolRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_node_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + )) + response = await client.create_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.CreateNodePoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +@pytest.mark.asyncio +async def test_create_node_pool_async_from_dict(): + await test_create_node_pool_async(request_type=dict) + + +def test_create_node_pool_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.CreateNodePoolRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_node_pool), + '__call__') as call: + call.return_value = cluster_service.Operation() + client.create_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_node_pool_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.CreateNodePoolRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_node_pool), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + await client.create_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_create_node_pool_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_node_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_node_pool( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + node_pool=cluster_service.NodePool(name='name_value'), + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].node_pool == cluster_service.NodePool(name='name_value') + assert args[0].parent == 'parent_value' + + +def test_create_node_pool_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_node_pool( + cluster_service.CreateNodePoolRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + node_pool=cluster_service.NodePool(name='name_value'), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_create_node_pool_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_node_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_node_pool( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + node_pool=cluster_service.NodePool(name='name_value'), + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].node_pool == cluster_service.NodePool(name='name_value') + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_create_node_pool_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_node_pool( + cluster_service.CreateNodePoolRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + node_pool=cluster_service.NodePool(name='name_value'), + parent='parent_value', + ) + + +def test_delete_node_pool(transport: str = 'grpc', request_type=cluster_service.DeleteNodePoolRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_node_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + ) + response = client.delete_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.DeleteNodePoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +def test_delete_node_pool_from_dict(): + test_delete_node_pool(request_type=dict) + + +def test_delete_node_pool_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_node_pool), + '__call__') as call: + client.delete_node_pool() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.DeleteNodePoolRequest() + + +@pytest.mark.asyncio +async def test_delete_node_pool_async(transport: str = 'grpc_asyncio', request_type=cluster_service.DeleteNodePoolRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_node_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + )) + response = await client.delete_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.DeleteNodePoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +@pytest.mark.asyncio +async def test_delete_node_pool_async_from_dict(): + await test_delete_node_pool_async(request_type=dict) + + +def test_delete_node_pool_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.DeleteNodePoolRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_node_pool), + '__call__') as call: + call.return_value = cluster_service.Operation() + client.delete_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_node_pool_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.DeleteNodePoolRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_node_pool), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + await client.delete_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_delete_node_pool_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_node_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_node_pool( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + node_pool_id='node_pool_id_value', + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].node_pool_id == 'node_pool_id_value' + assert args[0].name == 'name_value' + + +def test_delete_node_pool_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_node_pool( + cluster_service.DeleteNodePoolRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + node_pool_id='node_pool_id_value', + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_delete_node_pool_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_node_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_node_pool( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + node_pool_id='node_pool_id_value', + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].node_pool_id == 'node_pool_id_value' + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_delete_node_pool_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_node_pool( + cluster_service.DeleteNodePoolRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + node_pool_id='node_pool_id_value', + name='name_value', + ) + + +def test_rollback_node_pool_upgrade(transport: str = 'grpc', request_type=cluster_service.RollbackNodePoolUpgradeRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.rollback_node_pool_upgrade), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + ) + response = client.rollback_node_pool_upgrade(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.RollbackNodePoolUpgradeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +def test_rollback_node_pool_upgrade_from_dict(): + test_rollback_node_pool_upgrade(request_type=dict) + + +def test_rollback_node_pool_upgrade_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.rollback_node_pool_upgrade), + '__call__') as call: + client.rollback_node_pool_upgrade() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.RollbackNodePoolUpgradeRequest() + + +@pytest.mark.asyncio +async def test_rollback_node_pool_upgrade_async(transport: str = 'grpc_asyncio', request_type=cluster_service.RollbackNodePoolUpgradeRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.rollback_node_pool_upgrade), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + )) + response = await client.rollback_node_pool_upgrade(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.RollbackNodePoolUpgradeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +@pytest.mark.asyncio +async def test_rollback_node_pool_upgrade_async_from_dict(): + await test_rollback_node_pool_upgrade_async(request_type=dict) + + +def test_rollback_node_pool_upgrade_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.RollbackNodePoolUpgradeRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.rollback_node_pool_upgrade), + '__call__') as call: + call.return_value = cluster_service.Operation() + client.rollback_node_pool_upgrade(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_rollback_node_pool_upgrade_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.RollbackNodePoolUpgradeRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.rollback_node_pool_upgrade), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + await client.rollback_node_pool_upgrade(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_rollback_node_pool_upgrade_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.rollback_node_pool_upgrade), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.rollback_node_pool_upgrade( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + node_pool_id='node_pool_id_value', + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].node_pool_id == 'node_pool_id_value' + assert args[0].name == 'name_value' + + +def test_rollback_node_pool_upgrade_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.rollback_node_pool_upgrade( + cluster_service.RollbackNodePoolUpgradeRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + node_pool_id='node_pool_id_value', + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_rollback_node_pool_upgrade_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.rollback_node_pool_upgrade), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.rollback_node_pool_upgrade( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + node_pool_id='node_pool_id_value', + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].node_pool_id == 'node_pool_id_value' + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_rollback_node_pool_upgrade_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.rollback_node_pool_upgrade( + cluster_service.RollbackNodePoolUpgradeRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + node_pool_id='node_pool_id_value', + name='name_value', + ) + + +def test_set_node_pool_management(transport: str = 'grpc', request_type=cluster_service.SetNodePoolManagementRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_node_pool_management), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + ) + response = client.set_node_pool_management(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetNodePoolManagementRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +def test_set_node_pool_management_from_dict(): + test_set_node_pool_management(request_type=dict) + + +def test_set_node_pool_management_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_node_pool_management), + '__call__') as call: + client.set_node_pool_management() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetNodePoolManagementRequest() + + +@pytest.mark.asyncio +async def test_set_node_pool_management_async(transport: str = 'grpc_asyncio', request_type=cluster_service.SetNodePoolManagementRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_node_pool_management), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + )) + response = await client.set_node_pool_management(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetNodePoolManagementRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +@pytest.mark.asyncio +async def test_set_node_pool_management_async_from_dict(): + await test_set_node_pool_management_async(request_type=dict) + + +def test_set_node_pool_management_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetNodePoolManagementRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_node_pool_management), + '__call__') as call: + call.return_value = cluster_service.Operation() + client.set_node_pool_management(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_set_node_pool_management_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetNodePoolManagementRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_node_pool_management), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + await client.set_node_pool_management(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_set_labels(transport: str = 'grpc', request_type=cluster_service.SetLabelsRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_labels), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + ) + response = client.set_labels(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetLabelsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +def test_set_labels_from_dict(): + test_set_labels(request_type=dict) + + +def test_set_labels_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_labels), + '__call__') as call: + client.set_labels() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetLabelsRequest() + + +@pytest.mark.asyncio +async def test_set_labels_async(transport: str = 'grpc_asyncio', request_type=cluster_service.SetLabelsRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_labels), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + )) + response = await client.set_labels(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetLabelsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +@pytest.mark.asyncio +async def test_set_labels_async_from_dict(): + await test_set_labels_async(request_type=dict) + + +def test_set_labels_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetLabelsRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_labels), + '__call__') as call: + call.return_value = cluster_service.Operation() + client.set_labels(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_set_labels_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetLabelsRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_labels), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + await client.set_labels(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_set_legacy_abac(transport: str = 'grpc', request_type=cluster_service.SetLegacyAbacRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_legacy_abac), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + ) + response = client.set_legacy_abac(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetLegacyAbacRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +def test_set_legacy_abac_from_dict(): + test_set_legacy_abac(request_type=dict) + + +def test_set_legacy_abac_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_legacy_abac), + '__call__') as call: + client.set_legacy_abac() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetLegacyAbacRequest() + + +@pytest.mark.asyncio +async def test_set_legacy_abac_async(transport: str = 'grpc_asyncio', request_type=cluster_service.SetLegacyAbacRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_legacy_abac), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + )) + response = await client.set_legacy_abac(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetLegacyAbacRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +@pytest.mark.asyncio +async def test_set_legacy_abac_async_from_dict(): + await test_set_legacy_abac_async(request_type=dict) + + +def test_set_legacy_abac_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetLegacyAbacRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_legacy_abac), + '__call__') as call: + call.return_value = cluster_service.Operation() + client.set_legacy_abac(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_set_legacy_abac_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetLegacyAbacRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_legacy_abac), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + await client.set_legacy_abac(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_set_legacy_abac_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_legacy_abac), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.set_legacy_abac( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + enabled=True, + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].enabled == True + assert args[0].name == 'name_value' + + +def test_set_legacy_abac_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_legacy_abac( + cluster_service.SetLegacyAbacRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + enabled=True, + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_set_legacy_abac_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_legacy_abac), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.set_legacy_abac( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + enabled=True, + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].enabled == True + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_set_legacy_abac_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.set_legacy_abac( + cluster_service.SetLegacyAbacRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + enabled=True, + name='name_value', + ) + + +def test_start_ip_rotation(transport: str = 'grpc', request_type=cluster_service.StartIPRotationRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.start_ip_rotation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + ) + response = client.start_ip_rotation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.StartIPRotationRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +def test_start_ip_rotation_from_dict(): + test_start_ip_rotation(request_type=dict) + + +def test_start_ip_rotation_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.start_ip_rotation), + '__call__') as call: + client.start_ip_rotation() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.StartIPRotationRequest() + + +@pytest.mark.asyncio +async def test_start_ip_rotation_async(transport: str = 'grpc_asyncio', request_type=cluster_service.StartIPRotationRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.start_ip_rotation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + )) + response = await client.start_ip_rotation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.StartIPRotationRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +@pytest.mark.asyncio +async def test_start_ip_rotation_async_from_dict(): + await test_start_ip_rotation_async(request_type=dict) + + +def test_start_ip_rotation_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.StartIPRotationRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.start_ip_rotation), + '__call__') as call: + call.return_value = cluster_service.Operation() + client.start_ip_rotation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_start_ip_rotation_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.StartIPRotationRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.start_ip_rotation), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + await client.start_ip_rotation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_start_ip_rotation_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.start_ip_rotation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.start_ip_rotation( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].name == 'name_value' + + +def test_start_ip_rotation_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.start_ip_rotation( + cluster_service.StartIPRotationRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_start_ip_rotation_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.start_ip_rotation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.start_ip_rotation( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_start_ip_rotation_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.start_ip_rotation( + cluster_service.StartIPRotationRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + name='name_value', + ) + + +def test_complete_ip_rotation(transport: str = 'grpc', request_type=cluster_service.CompleteIPRotationRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.complete_ip_rotation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + ) + response = client.complete_ip_rotation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.CompleteIPRotationRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +def test_complete_ip_rotation_from_dict(): + test_complete_ip_rotation(request_type=dict) + + +def test_complete_ip_rotation_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.complete_ip_rotation), + '__call__') as call: + client.complete_ip_rotation() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.CompleteIPRotationRequest() + + +@pytest.mark.asyncio +async def test_complete_ip_rotation_async(transport: str = 'grpc_asyncio', request_type=cluster_service.CompleteIPRotationRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.complete_ip_rotation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + )) + response = await client.complete_ip_rotation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.CompleteIPRotationRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +@pytest.mark.asyncio +async def test_complete_ip_rotation_async_from_dict(): + await test_complete_ip_rotation_async(request_type=dict) + + +def test_complete_ip_rotation_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.CompleteIPRotationRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.complete_ip_rotation), + '__call__') as call: + call.return_value = cluster_service.Operation() + client.complete_ip_rotation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_complete_ip_rotation_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.CompleteIPRotationRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.complete_ip_rotation), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + await client.complete_ip_rotation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_complete_ip_rotation_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.complete_ip_rotation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.complete_ip_rotation( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].name == 'name_value' + + +def test_complete_ip_rotation_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.complete_ip_rotation( + cluster_service.CompleteIPRotationRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_complete_ip_rotation_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.complete_ip_rotation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.complete_ip_rotation( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_complete_ip_rotation_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.complete_ip_rotation( + cluster_service.CompleteIPRotationRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + name='name_value', + ) + + +def test_set_node_pool_size(transport: str = 'grpc', request_type=cluster_service.SetNodePoolSizeRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_node_pool_size), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + ) + response = client.set_node_pool_size(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetNodePoolSizeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +def test_set_node_pool_size_from_dict(): + test_set_node_pool_size(request_type=dict) + + +def test_set_node_pool_size_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_node_pool_size), + '__call__') as call: + client.set_node_pool_size() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetNodePoolSizeRequest() + + +@pytest.mark.asyncio +async def test_set_node_pool_size_async(transport: str = 'grpc_asyncio', request_type=cluster_service.SetNodePoolSizeRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_node_pool_size), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + )) + response = await client.set_node_pool_size(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetNodePoolSizeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +@pytest.mark.asyncio +async def test_set_node_pool_size_async_from_dict(): + await test_set_node_pool_size_async(request_type=dict) + + +def test_set_node_pool_size_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetNodePoolSizeRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_node_pool_size), + '__call__') as call: + call.return_value = cluster_service.Operation() + client.set_node_pool_size(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_set_node_pool_size_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetNodePoolSizeRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_node_pool_size), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + await client.set_node_pool_size(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_set_network_policy(transport: str = 'grpc', request_type=cluster_service.SetNetworkPolicyRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_network_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + ) + response = client.set_network_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetNetworkPolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +def test_set_network_policy_from_dict(): + test_set_network_policy(request_type=dict) + + +def test_set_network_policy_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_network_policy), + '__call__') as call: + client.set_network_policy() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetNetworkPolicyRequest() + + +@pytest.mark.asyncio +async def test_set_network_policy_async(transport: str = 'grpc_asyncio', request_type=cluster_service.SetNetworkPolicyRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_network_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + )) + response = await client.set_network_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetNetworkPolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +@pytest.mark.asyncio +async def test_set_network_policy_async_from_dict(): + await test_set_network_policy_async(request_type=dict) + + +def test_set_network_policy_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetNetworkPolicyRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_network_policy), + '__call__') as call: + call.return_value = cluster_service.Operation() + client.set_network_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_set_network_policy_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetNetworkPolicyRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_network_policy), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + await client.set_network_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_set_network_policy_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_network_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.set_network_policy( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + network_policy=cluster_service.NetworkPolicy(provider=cluster_service.NetworkPolicy.Provider.CALICO), + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].network_policy == cluster_service.NetworkPolicy(provider=cluster_service.NetworkPolicy.Provider.CALICO) + assert args[0].name == 'name_value' + + +def test_set_network_policy_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_network_policy( + cluster_service.SetNetworkPolicyRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + network_policy=cluster_service.NetworkPolicy(provider=cluster_service.NetworkPolicy.Provider.CALICO), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_set_network_policy_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_network_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.set_network_policy( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + network_policy=cluster_service.NetworkPolicy(provider=cluster_service.NetworkPolicy.Provider.CALICO), + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].network_policy == cluster_service.NetworkPolicy(provider=cluster_service.NetworkPolicy.Provider.CALICO) + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_set_network_policy_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.set_network_policy( + cluster_service.SetNetworkPolicyRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + network_policy=cluster_service.NetworkPolicy(provider=cluster_service.NetworkPolicy.Provider.CALICO), + name='name_value', + ) + + +def test_set_maintenance_policy(transport: str = 'grpc', request_type=cluster_service.SetMaintenancePolicyRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_maintenance_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + ) + response = client.set_maintenance_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetMaintenancePolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +def test_set_maintenance_policy_from_dict(): + test_set_maintenance_policy(request_type=dict) + + +def test_set_maintenance_policy_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_maintenance_policy), + '__call__') as call: + client.set_maintenance_policy() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetMaintenancePolicyRequest() + + +@pytest.mark.asyncio +async def test_set_maintenance_policy_async(transport: str = 'grpc_asyncio', request_type=cluster_service.SetMaintenancePolicyRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_maintenance_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + )) + response = await client.set_maintenance_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetMaintenancePolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +@pytest.mark.asyncio +async def test_set_maintenance_policy_async_from_dict(): + await test_set_maintenance_policy_async(request_type=dict) + + +def test_set_maintenance_policy_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetMaintenancePolicyRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_maintenance_policy), + '__call__') as call: + call.return_value = cluster_service.Operation() + client.set_maintenance_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_set_maintenance_policy_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetMaintenancePolicyRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_maintenance_policy), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + await client.set_maintenance_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_set_maintenance_policy_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_maintenance_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.set_maintenance_policy( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + maintenance_policy=cluster_service.MaintenancePolicy(window=cluster_service.MaintenanceWindow(daily_maintenance_window=cluster_service.DailyMaintenanceWindow(start_time='start_time_value'))), + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].maintenance_policy == cluster_service.MaintenancePolicy(window=cluster_service.MaintenanceWindow(daily_maintenance_window=cluster_service.DailyMaintenanceWindow(start_time='start_time_value'))) + assert args[0].name == 'name_value' + + +def test_set_maintenance_policy_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_maintenance_policy( + cluster_service.SetMaintenancePolicyRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + maintenance_policy=cluster_service.MaintenancePolicy(window=cluster_service.MaintenanceWindow(daily_maintenance_window=cluster_service.DailyMaintenanceWindow(start_time='start_time_value'))), + name='name_value', + ) + + +@pytest.mark.asyncio +async def test_set_maintenance_policy_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_maintenance_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.set_maintenance_policy( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + maintenance_policy=cluster_service.MaintenancePolicy(window=cluster_service.MaintenanceWindow(daily_maintenance_window=cluster_service.DailyMaintenanceWindow(start_time='start_time_value'))), + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].maintenance_policy == cluster_service.MaintenancePolicy(window=cluster_service.MaintenanceWindow(daily_maintenance_window=cluster_service.DailyMaintenanceWindow(start_time='start_time_value'))) + assert args[0].name == 'name_value' + + +@pytest.mark.asyncio +async def test_set_maintenance_policy_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.set_maintenance_policy( + cluster_service.SetMaintenancePolicyRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + maintenance_policy=cluster_service.MaintenancePolicy(window=cluster_service.MaintenanceWindow(daily_maintenance_window=cluster_service.DailyMaintenanceWindow(start_time='start_time_value'))), + name='name_value', + ) + + +def test_list_usable_subnetworks(transport: str = 'grpc', request_type=cluster_service.ListUsableSubnetworksRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_usable_subnetworks), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListUsableSubnetworksResponse( + next_page_token='next_page_token_value', + ) + response = client.list_usable_subnetworks(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.ListUsableSubnetworksRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListUsableSubnetworksPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_usable_subnetworks_from_dict(): + test_list_usable_subnetworks(request_type=dict) + + +def test_list_usable_subnetworks_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_usable_subnetworks), + '__call__') as call: + client.list_usable_subnetworks() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.ListUsableSubnetworksRequest() + + +@pytest.mark.asyncio +async def test_list_usable_subnetworks_async(transport: str = 'grpc_asyncio', request_type=cluster_service.ListUsableSubnetworksRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_usable_subnetworks), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListUsableSubnetworksResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_usable_subnetworks(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.ListUsableSubnetworksRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListUsableSubnetworksAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_usable_subnetworks_async_from_dict(): + await test_list_usable_subnetworks_async(request_type=dict) + + +def test_list_usable_subnetworks_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.ListUsableSubnetworksRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_usable_subnetworks), + '__call__') as call: + call.return_value = cluster_service.ListUsableSubnetworksResponse() + client.list_usable_subnetworks(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_usable_subnetworks_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.ListUsableSubnetworksRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_usable_subnetworks), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListUsableSubnetworksResponse()) + await client.list_usable_subnetworks(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_usable_subnetworks_pager(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_usable_subnetworks), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[ + cluster_service.UsableSubnetwork(), + cluster_service.UsableSubnetwork(), + cluster_service.UsableSubnetwork(), + ], + next_page_token='abc', + ), + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[], + next_page_token='def', + ), + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[ + cluster_service.UsableSubnetwork(), + ], + next_page_token='ghi', + ), + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[ + cluster_service.UsableSubnetwork(), + cluster_service.UsableSubnetwork(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_usable_subnetworks(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, cluster_service.UsableSubnetwork) + for i in results) + +def test_list_usable_subnetworks_pages(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_usable_subnetworks), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[ + cluster_service.UsableSubnetwork(), + cluster_service.UsableSubnetwork(), + cluster_service.UsableSubnetwork(), + ], + next_page_token='abc', + ), + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[], + next_page_token='def', + ), + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[ + cluster_service.UsableSubnetwork(), + ], + next_page_token='ghi', + ), + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[ + cluster_service.UsableSubnetwork(), + cluster_service.UsableSubnetwork(), + ], + ), + RuntimeError, + ) + pages = list(client.list_usable_subnetworks(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_usable_subnetworks_async_pager(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_usable_subnetworks), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[ + cluster_service.UsableSubnetwork(), + cluster_service.UsableSubnetwork(), + cluster_service.UsableSubnetwork(), + ], + next_page_token='abc', + ), + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[], + next_page_token='def', + ), + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[ + cluster_service.UsableSubnetwork(), + ], + next_page_token='ghi', + ), + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[ + cluster_service.UsableSubnetwork(), + cluster_service.UsableSubnetwork(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_usable_subnetworks(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, cluster_service.UsableSubnetwork) + for i in responses) + +@pytest.mark.asyncio +async def test_list_usable_subnetworks_async_pages(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_usable_subnetworks), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[ + cluster_service.UsableSubnetwork(), + cluster_service.UsableSubnetwork(), + cluster_service.UsableSubnetwork(), + ], + next_page_token='abc', + ), + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[], + next_page_token='def', + ), + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[ + cluster_service.UsableSubnetwork(), + ], + next_page_token='ghi', + ), + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[ + cluster_service.UsableSubnetwork(), + cluster_service.UsableSubnetwork(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_usable_subnetworks(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.ClusterManagerGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.ClusterManagerGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ClusterManagerClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.ClusterManagerGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ClusterManagerClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.ClusterManagerGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = ClusterManagerClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.ClusterManagerGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.ClusterManagerGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.ClusterManagerGrpcTransport, + transports.ClusterManagerGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.ClusterManagerGrpcTransport, + ) + +def test_cluster_manager_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.ClusterManagerTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_cluster_manager_base_transport(): + # Instantiate the base transport. + with mock.patch('google.container_v1.services.cluster_manager.transports.ClusterManagerTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.ClusterManagerTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'list_clusters', + 'get_cluster', + 'create_cluster', + 'update_cluster', + 'update_node_pool', + 'set_node_pool_autoscaling', + 'set_logging_service', + 'set_monitoring_service', + 'set_addons_config', + 'set_locations', + 'update_master', + 'set_master_auth', + 'delete_cluster', + 'list_operations', + 'get_operation', + 'cancel_operation', + 'get_server_config', + 'get_json_web_keys', + 'list_node_pools', + 'get_node_pool', + 'create_node_pool', + 'delete_node_pool', + 'rollback_node_pool_upgrade', + 'set_node_pool_management', + 'set_labels', + 'set_legacy_abac', + 'start_ip_rotation', + 'complete_ip_rotation', + 'set_node_pool_size', + 'set_network_policy', + 'set_maintenance_policy', + 'list_usable_subnetworks', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + +@requires_google_auth_gte_1_25_0 +def test_cluster_manager_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.container_v1.services.cluster_manager.transports.ClusterManagerTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ClusterManagerTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@requires_google_auth_lt_1_25_0 +def test_cluster_manager_base_transport_with_credentials_file_old_google_auth(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.container_v1.services.cluster_manager.transports.ClusterManagerTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ClusterManagerTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", scopes=( + 'https://www.googleapis.com/auth/cloud-platform', + ), + quota_project_id="octopus", + ) + + +def test_cluster_manager_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.container_v1.services.cluster_manager.transports.ClusterManagerTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ClusterManagerTransport() + adc.assert_called_once() + + +@requires_google_auth_gte_1_25_0 +def test_cluster_manager_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + ClusterManagerClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@requires_google_auth_lt_1_25_0 +def test_cluster_manager_auth_adc_old_google_auth(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + ClusterManagerClient() + adc.assert_called_once_with( + scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.ClusterManagerGrpcTransport, + transports.ClusterManagerGrpcAsyncIOTransport, + ], +) +@requires_google_auth_gte_1_25_0 +def test_cluster_manager_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.ClusterManagerGrpcTransport, + transports.ClusterManagerGrpcAsyncIOTransport, + ], +) +@requires_google_auth_lt_1_25_0 +def test_cluster_manager_transport_auth_adc_old_google_auth(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus") + adc.assert_called_once_with(scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.ClusterManagerGrpcTransport, grpc_helpers), + (transports.ClusterManagerGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_cluster_manager_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "container.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="container.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.ClusterManagerGrpcTransport, transports.ClusterManagerGrpcAsyncIOTransport]) +def test_cluster_manager_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +def test_cluster_manager_host_no_port(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='container.googleapis.com'), + ) + assert client.transport._host == 'container.googleapis.com:443' + + +def test_cluster_manager_host_with_port(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='container.googleapis.com:8000'), + ) + assert client.transport._host == 'container.googleapis.com:8000' + +def test_cluster_manager_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.ClusterManagerGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_cluster_manager_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.ClusterManagerGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.ClusterManagerGrpcTransport, transports.ClusterManagerGrpcAsyncIOTransport]) +def test_cluster_manager_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.ClusterManagerGrpcTransport, transports.ClusterManagerGrpcAsyncIOTransport]) +def test_cluster_manager_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = ClusterManagerClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = ClusterManagerClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = ClusterManagerClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = ClusterManagerClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = ClusterManagerClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = ClusterManagerClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = ClusterManagerClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = ClusterManagerClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = ClusterManagerClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = ClusterManagerClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = ClusterManagerClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = ClusterManagerClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = ClusterManagerClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = ClusterManagerClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = ClusterManagerClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.ClusterManagerTransport, '_prep_wrapped_messages') as prep: + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.ClusterManagerTransport, '_prep_wrapped_messages') as prep: + transport_class = ClusterManagerClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1beta1/.coveragerc b/owl-bot-staging/v1beta1/.coveragerc new file mode 100644 index 00000000..f0a87b59 --- /dev/null +++ b/owl-bot-staging/v1beta1/.coveragerc @@ -0,0 +1,17 @@ +[run] +branch = True + +[report] +show_missing = True +omit = + google/container/__init__.py +exclude_lines = + # Re-enable the standard pragma + pragma: NO COVER + # Ignore debug-only repr + def __repr__ + # Ignore pkg_resources exceptions. + # This is added at the module level as a safeguard for if someone + # generates the code and tries to run it without pip installing. This + # makes it virtually impossible to test properly. + except pkg_resources.DistributionNotFound diff --git a/owl-bot-staging/v1beta1/MANIFEST.in b/owl-bot-staging/v1beta1/MANIFEST.in new file mode 100644 index 00000000..36b8dd0a --- /dev/null +++ b/owl-bot-staging/v1beta1/MANIFEST.in @@ -0,0 +1,2 @@ +recursive-include google/container *.py +recursive-include google/container_v1beta1 *.py diff --git a/owl-bot-staging/v1beta1/README.rst b/owl-bot-staging/v1beta1/README.rst new file mode 100644 index 00000000..83d9858c --- /dev/null +++ b/owl-bot-staging/v1beta1/README.rst @@ -0,0 +1,49 @@ +Python Client for Google Container API +================================================= + +Quick Start +----------- + +In order to use this library, you first need to go through the following steps: + +1. `Select or create a Cloud Platform project.`_ +2. `Enable billing for your project.`_ +3. Enable the Google Container API. +4. `Setup Authentication.`_ + +.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project +.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project +.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html + +Installation +~~~~~~~~~~~~ + +Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to +create isolated Python environments. The basic problem it addresses is one of +dependencies and versions, and indirectly permissions. + +With `virtualenv`_, it's possible to install this library without needing system +install permissions, and without clashing with the installed system +dependencies. + +.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ + + +Mac/Linux +^^^^^^^^^ + +.. code-block:: console + + python3 -m venv + source /bin/activate + /bin/pip install /path/to/library + + +Windows +^^^^^^^ + +.. code-block:: console + + python3 -m venv + \Scripts\activate + \Scripts\pip.exe install \path\to\library diff --git a/owl-bot-staging/v1beta1/docs/conf.py b/owl-bot-staging/v1beta1/docs/conf.py new file mode 100644 index 00000000..1f19408e --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/conf.py @@ -0,0 +1,376 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +# google-container documentation build configuration file +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys +import os +import shlex + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +sys.path.insert(0, os.path.abspath("..")) + +__version__ = "0.1.0" + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +needs_sphinx = "1.6.3" + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.autosummary", + "sphinx.ext.intersphinx", + "sphinx.ext.coverage", + "sphinx.ext.napoleon", + "sphinx.ext.todo", + "sphinx.ext.viewcode", +] + +# autodoc/autosummary flags +autoclass_content = "both" +autodoc_default_flags = ["members"] +autosummary_generate = True + + +# Add any paths that contain templates here, relative to this directory. +templates_path = ["_templates"] + +# Allow markdown includes (so releases.md can include CHANGLEOG.md) +# http://www.sphinx-doc.org/en/master/markdown.html +source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +source_suffix = [".rst", ".md"] + +# The encoding of source files. +# source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = "index" + +# General information about the project. +project = u"google-container" +copyright = u"2020, Google, LLC" +author = u"Google APIs" # TODO: autogenerate this bit + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The full version, including alpha/beta/rc tags. +release = __version__ +# The short X.Y version. +version = ".".join(release.split(".")[0:2]) + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# today = '' +# Else, today_fmt is used as the format for a strftime call. +# today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ["_build"] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +# add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +# show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = "sphinx" + +# A list of ignored prefixes for module index sorting. +# modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +# keep_warnings = False + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = True + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = "alabaster" + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +html_theme_options = { + "description": "Google Client Libraries for Python", + "github_user": "googleapis", + "github_repo": "google-cloud-python", + "github_banner": True, + "font_family": "'Roboto', Georgia, sans", + "head_font_family": "'Roboto', Georgia, serif", + "code_font_family": "'Roboto Mono', 'Consolas', monospace", +} + +# Add any paths that contain custom themes here, relative to this directory. +# html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +# html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +# html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +# html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static"] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +# html_extra_path = [] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +# html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# html_additional_pages = {} + +# If false, no module index is generated. +# html_domain_indices = True + +# If false, no index is generated. +# html_use_index = True + +# If true, the index is split into individual pages for each letter. +# html_split_index = False + +# If true, links to the reST sources are added to the pages. +# html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +# html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +# html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = None + +# Language to be used for generating the HTML full-text search index. +# Sphinx supports the following languages: +# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' +# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' +# html_search_language = 'en' + +# A dictionary with options for the search language support, empty by default. +# Now only 'ja' uses this config value +# html_search_options = {'type': 'default'} + +# The name of a javascript file (relative to the configuration directory) that +# implements a search results scorer. If empty, the default will be used. +# html_search_scorer = 'scorer.js' + +# Output file base name for HTML help builder. +htmlhelp_basename = "google-container-doc" + +# -- Options for warnings ------------------------------------------------------ + + +suppress_warnings = [ + # Temporarily suppress this to avoid "more than one target found for + # cross-reference" warning, which are intractable for us to avoid while in + # a mono-repo. + # See https://github.com/sphinx-doc/sphinx/blob + # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 + "ref.python" +] + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # 'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + # 'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + # 'preamble': '', + # Latex figure (float) alignment + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + ( + master_doc, + "google-container.tex", + u"google-container Documentation", + author, + "manual", + ) +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# latex_use_parts = False + +# If true, show page references after internal links. +# latex_show_pagerefs = False + +# If true, show URL addresses after external links. +# latex_show_urls = False + +# Documents to append as an appendix to all manuals. +# latex_appendices = [] + +# If false, no module index is generated. +# latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ( + master_doc, + "google-container", + u"Google Container Documentation", + [author], + 1, + ) +] + +# If true, show URL addresses after external links. +# man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ( + master_doc, + "google-container", + u"google-container Documentation", + author, + "google-container", + "GAPIC library for Google Container API", + "APIs", + ) +] + +# Documents to append as an appendix to all manuals. +# texinfo_appendices = [] + +# If false, no module index is generated. +# texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +# texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +# texinfo_no_detailmenu = False + + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = { + "python": ("http://python.readthedocs.org/en/latest/", None), + "gax": ("https://gax-python.readthedocs.org/en/latest/", None), + "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), + "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), + "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None), + "grpc": ("https://grpc.io/grpc/python/", None), + "requests": ("http://requests.kennethreitz.org/en/stable/", None), + "proto": ("https://proto-plus-python.readthedocs.io/en/stable", None), + "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None), +} + + +# Napoleon settings +napoleon_google_docstring = True +napoleon_numpy_docstring = True +napoleon_include_private_with_doc = False +napoleon_include_special_with_doc = True +napoleon_use_admonition_for_examples = False +napoleon_use_admonition_for_notes = False +napoleon_use_admonition_for_references = False +napoleon_use_ivar = False +napoleon_use_param = True +napoleon_use_rtype = True diff --git a/owl-bot-staging/v1beta1/docs/container_v1beta1/cluster_manager.rst b/owl-bot-staging/v1beta1/docs/container_v1beta1/cluster_manager.rst new file mode 100644 index 00000000..9a9600fb --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/container_v1beta1/cluster_manager.rst @@ -0,0 +1,10 @@ +ClusterManager +-------------------------------- + +.. automodule:: google.container_v1beta1.services.cluster_manager + :members: + :inherited-members: + +.. automodule:: google.container_v1beta1.services.cluster_manager.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/container_v1beta1/services.rst b/owl-bot-staging/v1beta1/docs/container_v1beta1/services.rst new file mode 100644 index 00000000..18ed4869 --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/container_v1beta1/services.rst @@ -0,0 +1,6 @@ +Services for Google Container v1beta1 API +========================================= +.. toctree:: + :maxdepth: 2 + + cluster_manager diff --git a/owl-bot-staging/v1beta1/docs/container_v1beta1/types.rst b/owl-bot-staging/v1beta1/docs/container_v1beta1/types.rst new file mode 100644 index 00000000..053b05fb --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/container_v1beta1/types.rst @@ -0,0 +1,7 @@ +Types for Google Container v1beta1 API +====================================== + +.. automodule:: google.container_v1beta1.types + :members: + :undoc-members: + :show-inheritance: diff --git a/owl-bot-staging/v1beta1/docs/index.rst b/owl-bot-staging/v1beta1/docs/index.rst new file mode 100644 index 00000000..de07690b --- /dev/null +++ b/owl-bot-staging/v1beta1/docs/index.rst @@ -0,0 +1,7 @@ +API Reference +------------- +.. toctree:: + :maxdepth: 2 + + container_v1beta1/services + container_v1beta1/types diff --git a/owl-bot-staging/v1beta1/google/container/__init__.py b/owl-bot-staging/v1beta1/google/container/__init__.py new file mode 100644 index 00000000..22324c69 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/container/__init__.py @@ -0,0 +1,249 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from google.container_v1beta1.services.cluster_manager.client import ClusterManagerClient +from google.container_v1beta1.services.cluster_manager.async_client import ClusterManagerAsyncClient + +from google.container_v1beta1.types.cluster_service import AcceleratorConfig +from google.container_v1beta1.types.cluster_service import AddonsConfig +from google.container_v1beta1.types.cluster_service import AuthenticatorGroupsConfig +from google.container_v1beta1.types.cluster_service import AutoprovisioningNodePoolDefaults +from google.container_v1beta1.types.cluster_service import AutoUpgradeOptions +from google.container_v1beta1.types.cluster_service import BinaryAuthorization +from google.container_v1beta1.types.cluster_service import CancelOperationRequest +from google.container_v1beta1.types.cluster_service import ClientCertificateConfig +from google.container_v1beta1.types.cluster_service import CloudRunConfig +from google.container_v1beta1.types.cluster_service import Cluster +from google.container_v1beta1.types.cluster_service import ClusterAutoscaling +from google.container_v1beta1.types.cluster_service import ClusterTelemetry +from google.container_v1beta1.types.cluster_service import ClusterUpdate +from google.container_v1beta1.types.cluster_service import CompleteIPRotationRequest +from google.container_v1beta1.types.cluster_service import ConfidentialNodes +from google.container_v1beta1.types.cluster_service import ConfigConnectorConfig +from google.container_v1beta1.types.cluster_service import CreateClusterRequest +from google.container_v1beta1.types.cluster_service import CreateNodePoolRequest +from google.container_v1beta1.types.cluster_service import DailyMaintenanceWindow +from google.container_v1beta1.types.cluster_service import DatabaseEncryption +from google.container_v1beta1.types.cluster_service import DefaultSnatStatus +from google.container_v1beta1.types.cluster_service import DeleteClusterRequest +from google.container_v1beta1.types.cluster_service import DeleteNodePoolRequest +from google.container_v1beta1.types.cluster_service import DnsCacheConfig +from google.container_v1beta1.types.cluster_service import EphemeralStorageConfig +from google.container_v1beta1.types.cluster_service import GcePersistentDiskCsiDriverConfig +from google.container_v1beta1.types.cluster_service import GetClusterRequest +from google.container_v1beta1.types.cluster_service import GetJSONWebKeysRequest +from google.container_v1beta1.types.cluster_service import GetJSONWebKeysResponse +from google.container_v1beta1.types.cluster_service import GetNodePoolRequest +from google.container_v1beta1.types.cluster_service import GetOpenIDConfigRequest +from google.container_v1beta1.types.cluster_service import GetOpenIDConfigResponse +from google.container_v1beta1.types.cluster_service import GetOperationRequest +from google.container_v1beta1.types.cluster_service import GetServerConfigRequest +from google.container_v1beta1.types.cluster_service import HorizontalPodAutoscaling +from google.container_v1beta1.types.cluster_service import HttpLoadBalancing +from google.container_v1beta1.types.cluster_service import IntraNodeVisibilityConfig +from google.container_v1beta1.types.cluster_service import IPAllocationPolicy +from google.container_v1beta1.types.cluster_service import IstioConfig +from google.container_v1beta1.types.cluster_service import Jwk +from google.container_v1beta1.types.cluster_service import KalmConfig +from google.container_v1beta1.types.cluster_service import KubernetesDashboard +from google.container_v1beta1.types.cluster_service import LegacyAbac +from google.container_v1beta1.types.cluster_service import LinuxNodeConfig +from google.container_v1beta1.types.cluster_service import ListClustersRequest +from google.container_v1beta1.types.cluster_service import ListClustersResponse +from google.container_v1beta1.types.cluster_service import ListLocationsRequest +from google.container_v1beta1.types.cluster_service import ListLocationsResponse +from google.container_v1beta1.types.cluster_service import ListNodePoolsRequest +from google.container_v1beta1.types.cluster_service import ListNodePoolsResponse +from google.container_v1beta1.types.cluster_service import ListOperationsRequest +from google.container_v1beta1.types.cluster_service import ListOperationsResponse +from google.container_v1beta1.types.cluster_service import ListUsableSubnetworksRequest +from google.container_v1beta1.types.cluster_service import ListUsableSubnetworksResponse +from google.container_v1beta1.types.cluster_service import Location +from google.container_v1beta1.types.cluster_service import MaintenancePolicy +from google.container_v1beta1.types.cluster_service import MaintenanceWindow +from google.container_v1beta1.types.cluster_service import Master +from google.container_v1beta1.types.cluster_service import MasterAuth +from google.container_v1beta1.types.cluster_service import MasterAuthorizedNetworksConfig +from google.container_v1beta1.types.cluster_service import MaxPodsConstraint +from google.container_v1beta1.types.cluster_service import NetworkConfig +from google.container_v1beta1.types.cluster_service import NetworkPolicy +from google.container_v1beta1.types.cluster_service import NetworkPolicyConfig +from google.container_v1beta1.types.cluster_service import NodeConfig +from google.container_v1beta1.types.cluster_service import NodeKubeletConfig +from google.container_v1beta1.types.cluster_service import NodeManagement +from google.container_v1beta1.types.cluster_service import NodePool +from google.container_v1beta1.types.cluster_service import NodePoolAutoscaling +from google.container_v1beta1.types.cluster_service import NodeTaint +from google.container_v1beta1.types.cluster_service import NotificationConfig +from google.container_v1beta1.types.cluster_service import Operation +from google.container_v1beta1.types.cluster_service import OperationProgress +from google.container_v1beta1.types.cluster_service import PodSecurityPolicyConfig +from google.container_v1beta1.types.cluster_service import PrivateClusterConfig +from google.container_v1beta1.types.cluster_service import PrivateClusterMasterGlobalAccessConfig +from google.container_v1beta1.types.cluster_service import RecurringTimeWindow +from google.container_v1beta1.types.cluster_service import ReleaseChannel +from google.container_v1beta1.types.cluster_service import ReservationAffinity +from google.container_v1beta1.types.cluster_service import ResourceLimit +from google.container_v1beta1.types.cluster_service import ResourceUsageExportConfig +from google.container_v1beta1.types.cluster_service import RollbackNodePoolUpgradeRequest +from google.container_v1beta1.types.cluster_service import SandboxConfig +from google.container_v1beta1.types.cluster_service import ServerConfig +from google.container_v1beta1.types.cluster_service import SetAddonsConfigRequest +from google.container_v1beta1.types.cluster_service import SetLabelsRequest +from google.container_v1beta1.types.cluster_service import SetLegacyAbacRequest +from google.container_v1beta1.types.cluster_service import SetLocationsRequest +from google.container_v1beta1.types.cluster_service import SetLoggingServiceRequest +from google.container_v1beta1.types.cluster_service import SetMaintenancePolicyRequest +from google.container_v1beta1.types.cluster_service import SetMasterAuthRequest +from google.container_v1beta1.types.cluster_service import SetMonitoringServiceRequest +from google.container_v1beta1.types.cluster_service import SetNetworkPolicyRequest +from google.container_v1beta1.types.cluster_service import SetNodePoolAutoscalingRequest +from google.container_v1beta1.types.cluster_service import SetNodePoolManagementRequest +from google.container_v1beta1.types.cluster_service import SetNodePoolSizeRequest +from google.container_v1beta1.types.cluster_service import ShieldedInstanceConfig +from google.container_v1beta1.types.cluster_service import ShieldedNodes +from google.container_v1beta1.types.cluster_service import StartIPRotationRequest +from google.container_v1beta1.types.cluster_service import StatusCondition +from google.container_v1beta1.types.cluster_service import TimeWindow +from google.container_v1beta1.types.cluster_service import TpuConfig +from google.container_v1beta1.types.cluster_service import UpdateClusterRequest +from google.container_v1beta1.types.cluster_service import UpdateMasterRequest +from google.container_v1beta1.types.cluster_service import UpdateNodePoolRequest +from google.container_v1beta1.types.cluster_service import UpgradeEvent +from google.container_v1beta1.types.cluster_service import UsableSubnetwork +from google.container_v1beta1.types.cluster_service import UsableSubnetworkSecondaryRange +from google.container_v1beta1.types.cluster_service import VerticalPodAutoscaling +from google.container_v1beta1.types.cluster_service import WorkloadIdentityConfig +from google.container_v1beta1.types.cluster_service import WorkloadMetadataConfig +from google.container_v1beta1.types.cluster_service import DatapathProvider +from google.container_v1beta1.types.cluster_service import UpgradeResourceType + +__all__ = ('ClusterManagerClient', + 'ClusterManagerAsyncClient', + 'AcceleratorConfig', + 'AddonsConfig', + 'AuthenticatorGroupsConfig', + 'AutoprovisioningNodePoolDefaults', + 'AutoUpgradeOptions', + 'BinaryAuthorization', + 'CancelOperationRequest', + 'ClientCertificateConfig', + 'CloudRunConfig', + 'Cluster', + 'ClusterAutoscaling', + 'ClusterTelemetry', + 'ClusterUpdate', + 'CompleteIPRotationRequest', + 'ConfidentialNodes', + 'ConfigConnectorConfig', + 'CreateClusterRequest', + 'CreateNodePoolRequest', + 'DailyMaintenanceWindow', + 'DatabaseEncryption', + 'DefaultSnatStatus', + 'DeleteClusterRequest', + 'DeleteNodePoolRequest', + 'DnsCacheConfig', + 'EphemeralStorageConfig', + 'GcePersistentDiskCsiDriverConfig', + 'GetClusterRequest', + 'GetJSONWebKeysRequest', + 'GetJSONWebKeysResponse', + 'GetNodePoolRequest', + 'GetOpenIDConfigRequest', + 'GetOpenIDConfigResponse', + 'GetOperationRequest', + 'GetServerConfigRequest', + 'HorizontalPodAutoscaling', + 'HttpLoadBalancing', + 'IntraNodeVisibilityConfig', + 'IPAllocationPolicy', + 'IstioConfig', + 'Jwk', + 'KalmConfig', + 'KubernetesDashboard', + 'LegacyAbac', + 'LinuxNodeConfig', + 'ListClustersRequest', + 'ListClustersResponse', + 'ListLocationsRequest', + 'ListLocationsResponse', + 'ListNodePoolsRequest', + 'ListNodePoolsResponse', + 'ListOperationsRequest', + 'ListOperationsResponse', + 'ListUsableSubnetworksRequest', + 'ListUsableSubnetworksResponse', + 'Location', + 'MaintenancePolicy', + 'MaintenanceWindow', + 'Master', + 'MasterAuth', + 'MasterAuthorizedNetworksConfig', + 'MaxPodsConstraint', + 'NetworkConfig', + 'NetworkPolicy', + 'NetworkPolicyConfig', + 'NodeConfig', + 'NodeKubeletConfig', + 'NodeManagement', + 'NodePool', + 'NodePoolAutoscaling', + 'NodeTaint', + 'NotificationConfig', + 'Operation', + 'OperationProgress', + 'PodSecurityPolicyConfig', + 'PrivateClusterConfig', + 'PrivateClusterMasterGlobalAccessConfig', + 'RecurringTimeWindow', + 'ReleaseChannel', + 'ReservationAffinity', + 'ResourceLimit', + 'ResourceUsageExportConfig', + 'RollbackNodePoolUpgradeRequest', + 'SandboxConfig', + 'ServerConfig', + 'SetAddonsConfigRequest', + 'SetLabelsRequest', + 'SetLegacyAbacRequest', + 'SetLocationsRequest', + 'SetLoggingServiceRequest', + 'SetMaintenancePolicyRequest', + 'SetMasterAuthRequest', + 'SetMonitoringServiceRequest', + 'SetNetworkPolicyRequest', + 'SetNodePoolAutoscalingRequest', + 'SetNodePoolManagementRequest', + 'SetNodePoolSizeRequest', + 'ShieldedInstanceConfig', + 'ShieldedNodes', + 'StartIPRotationRequest', + 'StatusCondition', + 'TimeWindow', + 'TpuConfig', + 'UpdateClusterRequest', + 'UpdateMasterRequest', + 'UpdateNodePoolRequest', + 'UpgradeEvent', + 'UsableSubnetwork', + 'UsableSubnetworkSecondaryRange', + 'VerticalPodAutoscaling', + 'WorkloadIdentityConfig', + 'WorkloadMetadataConfig', + 'DatapathProvider', + 'UpgradeResourceType', +) diff --git a/owl-bot-staging/v1beta1/google/container/py.typed b/owl-bot-staging/v1beta1/google/container/py.typed new file mode 100644 index 00000000..fd835114 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/container/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-container package uses inline types. diff --git a/owl-bot-staging/v1beta1/google/container_v1beta1/__init__.py b/owl-bot-staging/v1beta1/google/container_v1beta1/__init__.py new file mode 100644 index 00000000..81480c82 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/container_v1beta1/__init__.py @@ -0,0 +1,250 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .services.cluster_manager import ClusterManagerClient +from .services.cluster_manager import ClusterManagerAsyncClient + +from .types.cluster_service import AcceleratorConfig +from .types.cluster_service import AddonsConfig +from .types.cluster_service import AuthenticatorGroupsConfig +from .types.cluster_service import AutoprovisioningNodePoolDefaults +from .types.cluster_service import AutoUpgradeOptions +from .types.cluster_service import BinaryAuthorization +from .types.cluster_service import CancelOperationRequest +from .types.cluster_service import ClientCertificateConfig +from .types.cluster_service import CloudRunConfig +from .types.cluster_service import Cluster +from .types.cluster_service import ClusterAutoscaling +from .types.cluster_service import ClusterTelemetry +from .types.cluster_service import ClusterUpdate +from .types.cluster_service import CompleteIPRotationRequest +from .types.cluster_service import ConfidentialNodes +from .types.cluster_service import ConfigConnectorConfig +from .types.cluster_service import CreateClusterRequest +from .types.cluster_service import CreateNodePoolRequest +from .types.cluster_service import DailyMaintenanceWindow +from .types.cluster_service import DatabaseEncryption +from .types.cluster_service import DefaultSnatStatus +from .types.cluster_service import DeleteClusterRequest +from .types.cluster_service import DeleteNodePoolRequest +from .types.cluster_service import DnsCacheConfig +from .types.cluster_service import EphemeralStorageConfig +from .types.cluster_service import GcePersistentDiskCsiDriverConfig +from .types.cluster_service import GetClusterRequest +from .types.cluster_service import GetJSONWebKeysRequest +from .types.cluster_service import GetJSONWebKeysResponse +from .types.cluster_service import GetNodePoolRequest +from .types.cluster_service import GetOpenIDConfigRequest +from .types.cluster_service import GetOpenIDConfigResponse +from .types.cluster_service import GetOperationRequest +from .types.cluster_service import GetServerConfigRequest +from .types.cluster_service import HorizontalPodAutoscaling +from .types.cluster_service import HttpLoadBalancing +from .types.cluster_service import IntraNodeVisibilityConfig +from .types.cluster_service import IPAllocationPolicy +from .types.cluster_service import IstioConfig +from .types.cluster_service import Jwk +from .types.cluster_service import KalmConfig +from .types.cluster_service import KubernetesDashboard +from .types.cluster_service import LegacyAbac +from .types.cluster_service import LinuxNodeConfig +from .types.cluster_service import ListClustersRequest +from .types.cluster_service import ListClustersResponse +from .types.cluster_service import ListLocationsRequest +from .types.cluster_service import ListLocationsResponse +from .types.cluster_service import ListNodePoolsRequest +from .types.cluster_service import ListNodePoolsResponse +from .types.cluster_service import ListOperationsRequest +from .types.cluster_service import ListOperationsResponse +from .types.cluster_service import ListUsableSubnetworksRequest +from .types.cluster_service import ListUsableSubnetworksResponse +from .types.cluster_service import Location +from .types.cluster_service import MaintenancePolicy +from .types.cluster_service import MaintenanceWindow +from .types.cluster_service import Master +from .types.cluster_service import MasterAuth +from .types.cluster_service import MasterAuthorizedNetworksConfig +from .types.cluster_service import MaxPodsConstraint +from .types.cluster_service import NetworkConfig +from .types.cluster_service import NetworkPolicy +from .types.cluster_service import NetworkPolicyConfig +from .types.cluster_service import NodeConfig +from .types.cluster_service import NodeKubeletConfig +from .types.cluster_service import NodeManagement +from .types.cluster_service import NodePool +from .types.cluster_service import NodePoolAutoscaling +from .types.cluster_service import NodeTaint +from .types.cluster_service import NotificationConfig +from .types.cluster_service import Operation +from .types.cluster_service import OperationProgress +from .types.cluster_service import PodSecurityPolicyConfig +from .types.cluster_service import PrivateClusterConfig +from .types.cluster_service import PrivateClusterMasterGlobalAccessConfig +from .types.cluster_service import RecurringTimeWindow +from .types.cluster_service import ReleaseChannel +from .types.cluster_service import ReservationAffinity +from .types.cluster_service import ResourceLimit +from .types.cluster_service import ResourceUsageExportConfig +from .types.cluster_service import RollbackNodePoolUpgradeRequest +from .types.cluster_service import SandboxConfig +from .types.cluster_service import ServerConfig +from .types.cluster_service import SetAddonsConfigRequest +from .types.cluster_service import SetLabelsRequest +from .types.cluster_service import SetLegacyAbacRequest +from .types.cluster_service import SetLocationsRequest +from .types.cluster_service import SetLoggingServiceRequest +from .types.cluster_service import SetMaintenancePolicyRequest +from .types.cluster_service import SetMasterAuthRequest +from .types.cluster_service import SetMonitoringServiceRequest +from .types.cluster_service import SetNetworkPolicyRequest +from .types.cluster_service import SetNodePoolAutoscalingRequest +from .types.cluster_service import SetNodePoolManagementRequest +from .types.cluster_service import SetNodePoolSizeRequest +from .types.cluster_service import ShieldedInstanceConfig +from .types.cluster_service import ShieldedNodes +from .types.cluster_service import StartIPRotationRequest +from .types.cluster_service import StatusCondition +from .types.cluster_service import TimeWindow +from .types.cluster_service import TpuConfig +from .types.cluster_service import UpdateClusterRequest +from .types.cluster_service import UpdateMasterRequest +from .types.cluster_service import UpdateNodePoolRequest +from .types.cluster_service import UpgradeEvent +from .types.cluster_service import UsableSubnetwork +from .types.cluster_service import UsableSubnetworkSecondaryRange +from .types.cluster_service import VerticalPodAutoscaling +from .types.cluster_service import WorkloadIdentityConfig +from .types.cluster_service import WorkloadMetadataConfig +from .types.cluster_service import DatapathProvider +from .types.cluster_service import UpgradeResourceType + +__all__ = ( + 'ClusterManagerAsyncClient', +'AcceleratorConfig', +'AddonsConfig', +'AuthenticatorGroupsConfig', +'AutoUpgradeOptions', +'AutoprovisioningNodePoolDefaults', +'BinaryAuthorization', +'CancelOperationRequest', +'ClientCertificateConfig', +'CloudRunConfig', +'Cluster', +'ClusterAutoscaling', +'ClusterManagerClient', +'ClusterTelemetry', +'ClusterUpdate', +'CompleteIPRotationRequest', +'ConfidentialNodes', +'ConfigConnectorConfig', +'CreateClusterRequest', +'CreateNodePoolRequest', +'DailyMaintenanceWindow', +'DatabaseEncryption', +'DatapathProvider', +'DefaultSnatStatus', +'DeleteClusterRequest', +'DeleteNodePoolRequest', +'DnsCacheConfig', +'EphemeralStorageConfig', +'GcePersistentDiskCsiDriverConfig', +'GetClusterRequest', +'GetJSONWebKeysRequest', +'GetJSONWebKeysResponse', +'GetNodePoolRequest', +'GetOpenIDConfigRequest', +'GetOpenIDConfigResponse', +'GetOperationRequest', +'GetServerConfigRequest', +'HorizontalPodAutoscaling', +'HttpLoadBalancing', +'IPAllocationPolicy', +'IntraNodeVisibilityConfig', +'IstioConfig', +'Jwk', +'KalmConfig', +'KubernetesDashboard', +'LegacyAbac', +'LinuxNodeConfig', +'ListClustersRequest', +'ListClustersResponse', +'ListLocationsRequest', +'ListLocationsResponse', +'ListNodePoolsRequest', +'ListNodePoolsResponse', +'ListOperationsRequest', +'ListOperationsResponse', +'ListUsableSubnetworksRequest', +'ListUsableSubnetworksResponse', +'Location', +'MaintenancePolicy', +'MaintenanceWindow', +'Master', +'MasterAuth', +'MasterAuthorizedNetworksConfig', +'MaxPodsConstraint', +'NetworkConfig', +'NetworkPolicy', +'NetworkPolicyConfig', +'NodeConfig', +'NodeKubeletConfig', +'NodeManagement', +'NodePool', +'NodePoolAutoscaling', +'NodeTaint', +'NotificationConfig', +'Operation', +'OperationProgress', +'PodSecurityPolicyConfig', +'PrivateClusterConfig', +'PrivateClusterMasterGlobalAccessConfig', +'RecurringTimeWindow', +'ReleaseChannel', +'ReservationAffinity', +'ResourceLimit', +'ResourceUsageExportConfig', +'RollbackNodePoolUpgradeRequest', +'SandboxConfig', +'ServerConfig', +'SetAddonsConfigRequest', +'SetLabelsRequest', +'SetLegacyAbacRequest', +'SetLocationsRequest', +'SetLoggingServiceRequest', +'SetMaintenancePolicyRequest', +'SetMasterAuthRequest', +'SetMonitoringServiceRequest', +'SetNetworkPolicyRequest', +'SetNodePoolAutoscalingRequest', +'SetNodePoolManagementRequest', +'SetNodePoolSizeRequest', +'ShieldedInstanceConfig', +'ShieldedNodes', +'StartIPRotationRequest', +'StatusCondition', +'TimeWindow', +'TpuConfig', +'UpdateClusterRequest', +'UpdateMasterRequest', +'UpdateNodePoolRequest', +'UpgradeEvent', +'UpgradeResourceType', +'UsableSubnetwork', +'UsableSubnetworkSecondaryRange', +'VerticalPodAutoscaling', +'WorkloadIdentityConfig', +'WorkloadMetadataConfig', +) diff --git a/owl-bot-staging/v1beta1/google/container_v1beta1/gapic_metadata.json b/owl-bot-staging/v1beta1/google/container_v1beta1/gapic_metadata.json new file mode 100644 index 00000000..0ff8e5d5 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/container_v1beta1/gapic_metadata.json @@ -0,0 +1,353 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.container_v1beta1", + "protoPackage": "google.container.v1beta1", + "schema": "1.0", + "services": { + "ClusterManager": { + "clients": { + "grpc": { + "libraryClient": "ClusterManagerClient", + "rpcs": { + "CancelOperation": { + "methods": [ + "cancel_operation" + ] + }, + "CompleteIPRotation": { + "methods": [ + "complete_ip_rotation" + ] + }, + "CreateCluster": { + "methods": [ + "create_cluster" + ] + }, + "CreateNodePool": { + "methods": [ + "create_node_pool" + ] + }, + "DeleteCluster": { + "methods": [ + "delete_cluster" + ] + }, + "DeleteNodePool": { + "methods": [ + "delete_node_pool" + ] + }, + "GetCluster": { + "methods": [ + "get_cluster" + ] + }, + "GetJSONWebKeys": { + "methods": [ + "get_json_web_keys" + ] + }, + "GetNodePool": { + "methods": [ + "get_node_pool" + ] + }, + "GetOperation": { + "methods": [ + "get_operation" + ] + }, + "GetServerConfig": { + "methods": [ + "get_server_config" + ] + }, + "ListClusters": { + "methods": [ + "list_clusters" + ] + }, + "ListLocations": { + "methods": [ + "list_locations" + ] + }, + "ListNodePools": { + "methods": [ + "list_node_pools" + ] + }, + "ListOperations": { + "methods": [ + "list_operations" + ] + }, + "ListUsableSubnetworks": { + "methods": [ + "list_usable_subnetworks" + ] + }, + "RollbackNodePoolUpgrade": { + "methods": [ + "rollback_node_pool_upgrade" + ] + }, + "SetAddonsConfig": { + "methods": [ + "set_addons_config" + ] + }, + "SetLabels": { + "methods": [ + "set_labels" + ] + }, + "SetLegacyAbac": { + "methods": [ + "set_legacy_abac" + ] + }, + "SetLocations": { + "methods": [ + "set_locations" + ] + }, + "SetLoggingService": { + "methods": [ + "set_logging_service" + ] + }, + "SetMaintenancePolicy": { + "methods": [ + "set_maintenance_policy" + ] + }, + "SetMasterAuth": { + "methods": [ + "set_master_auth" + ] + }, + "SetMonitoringService": { + "methods": [ + "set_monitoring_service" + ] + }, + "SetNetworkPolicy": { + "methods": [ + "set_network_policy" + ] + }, + "SetNodePoolAutoscaling": { + "methods": [ + "set_node_pool_autoscaling" + ] + }, + "SetNodePoolManagement": { + "methods": [ + "set_node_pool_management" + ] + }, + "SetNodePoolSize": { + "methods": [ + "set_node_pool_size" + ] + }, + "StartIPRotation": { + "methods": [ + "start_ip_rotation" + ] + }, + "UpdateCluster": { + "methods": [ + "update_cluster" + ] + }, + "UpdateMaster": { + "methods": [ + "update_master" + ] + }, + "UpdateNodePool": { + "methods": [ + "update_node_pool" + ] + } + } + }, + "grpc-async": { + "libraryClient": "ClusterManagerAsyncClient", + "rpcs": { + "CancelOperation": { + "methods": [ + "cancel_operation" + ] + }, + "CompleteIPRotation": { + "methods": [ + "complete_ip_rotation" + ] + }, + "CreateCluster": { + "methods": [ + "create_cluster" + ] + }, + "CreateNodePool": { + "methods": [ + "create_node_pool" + ] + }, + "DeleteCluster": { + "methods": [ + "delete_cluster" + ] + }, + "DeleteNodePool": { + "methods": [ + "delete_node_pool" + ] + }, + "GetCluster": { + "methods": [ + "get_cluster" + ] + }, + "GetJSONWebKeys": { + "methods": [ + "get_json_web_keys" + ] + }, + "GetNodePool": { + "methods": [ + "get_node_pool" + ] + }, + "GetOperation": { + "methods": [ + "get_operation" + ] + }, + "GetServerConfig": { + "methods": [ + "get_server_config" + ] + }, + "ListClusters": { + "methods": [ + "list_clusters" + ] + }, + "ListLocations": { + "methods": [ + "list_locations" + ] + }, + "ListNodePools": { + "methods": [ + "list_node_pools" + ] + }, + "ListOperations": { + "methods": [ + "list_operations" + ] + }, + "ListUsableSubnetworks": { + "methods": [ + "list_usable_subnetworks" + ] + }, + "RollbackNodePoolUpgrade": { + "methods": [ + "rollback_node_pool_upgrade" + ] + }, + "SetAddonsConfig": { + "methods": [ + "set_addons_config" + ] + }, + "SetLabels": { + "methods": [ + "set_labels" + ] + }, + "SetLegacyAbac": { + "methods": [ + "set_legacy_abac" + ] + }, + "SetLocations": { + "methods": [ + "set_locations" + ] + }, + "SetLoggingService": { + "methods": [ + "set_logging_service" + ] + }, + "SetMaintenancePolicy": { + "methods": [ + "set_maintenance_policy" + ] + }, + "SetMasterAuth": { + "methods": [ + "set_master_auth" + ] + }, + "SetMonitoringService": { + "methods": [ + "set_monitoring_service" + ] + }, + "SetNetworkPolicy": { + "methods": [ + "set_network_policy" + ] + }, + "SetNodePoolAutoscaling": { + "methods": [ + "set_node_pool_autoscaling" + ] + }, + "SetNodePoolManagement": { + "methods": [ + "set_node_pool_management" + ] + }, + "SetNodePoolSize": { + "methods": [ + "set_node_pool_size" + ] + }, + "StartIPRotation": { + "methods": [ + "start_ip_rotation" + ] + }, + "UpdateCluster": { + "methods": [ + "update_cluster" + ] + }, + "UpdateMaster": { + "methods": [ + "update_master" + ] + }, + "UpdateNodePool": { + "methods": [ + "update_node_pool" + ] + } + } + } + } + } + } +} diff --git a/owl-bot-staging/v1beta1/google/container_v1beta1/py.typed b/owl-bot-staging/v1beta1/google/container_v1beta1/py.typed new file mode 100644 index 00000000..fd835114 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/container_v1beta1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-container package uses inline types. diff --git a/owl-bot-staging/v1beta1/google/container_v1beta1/services/__init__.py b/owl-bot-staging/v1beta1/google/container_v1beta1/services/__init__.py new file mode 100644 index 00000000..4de65971 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/container_v1beta1/services/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/__init__.py b/owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/__init__.py new file mode 100644 index 00000000..490efad3 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import ClusterManagerClient +from .async_client import ClusterManagerAsyncClient + +__all__ = ( + 'ClusterManagerClient', + 'ClusterManagerAsyncClient', +) diff --git a/owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/async_client.py b/owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/async_client.py new file mode 100644 index 00000000..14848aa7 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/async_client.py @@ -0,0 +1,3632 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Sequence, Tuple, Type, Union +import pkg_resources +import warnings + +import google.api_core.client_options as ClientOptions # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.container_v1beta1.services.cluster_manager import pagers +from google.container_v1beta1.types import cluster_service +from google.rpc import status_pb2 # type: ignore +from .transports.base import ClusterManagerTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import ClusterManagerGrpcAsyncIOTransport +from .client import ClusterManagerClient + + +class ClusterManagerAsyncClient: + """Google Kubernetes Engine Cluster Manager v1beta1""" + + _client: ClusterManagerClient + + DEFAULT_ENDPOINT = ClusterManagerClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = ClusterManagerClient.DEFAULT_MTLS_ENDPOINT + + topic_path = staticmethod(ClusterManagerClient.topic_path) + parse_topic_path = staticmethod(ClusterManagerClient.parse_topic_path) + common_billing_account_path = staticmethod(ClusterManagerClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(ClusterManagerClient.parse_common_billing_account_path) + common_folder_path = staticmethod(ClusterManagerClient.common_folder_path) + parse_common_folder_path = staticmethod(ClusterManagerClient.parse_common_folder_path) + common_organization_path = staticmethod(ClusterManagerClient.common_organization_path) + parse_common_organization_path = staticmethod(ClusterManagerClient.parse_common_organization_path) + common_project_path = staticmethod(ClusterManagerClient.common_project_path) + parse_common_project_path = staticmethod(ClusterManagerClient.parse_common_project_path) + common_location_path = staticmethod(ClusterManagerClient.common_location_path) + parse_common_location_path = staticmethod(ClusterManagerClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ClusterManagerAsyncClient: The constructed client. + """ + return ClusterManagerClient.from_service_account_info.__func__(ClusterManagerAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ClusterManagerAsyncClient: The constructed client. + """ + return ClusterManagerClient.from_service_account_file.__func__(ClusterManagerAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> ClusterManagerTransport: + """Returns the transport used by the client instance. + + Returns: + ClusterManagerTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(ClusterManagerClient).get_transport_class, type(ClusterManagerClient)) + + def __init__(self, *, + credentials: ga_credentials.Credentials = None, + transport: Union[str, ClusterManagerTransport] = "grpc_asyncio", + client_options: ClientOptions = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the cluster manager client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.ClusterManagerTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = ClusterManagerClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def list_clusters(self, + request: cluster_service.ListClustersRequest = None, + *, + project_id: str = None, + zone: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.ListClustersResponse: + r"""Lists all clusters owned by a project in either the + specified zone or all zones. + + Args: + request (:class:`google.container_v1beta1.types.ListClustersRequest`): + The request object. ListClustersRequest lists clusters. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the + parent field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides, or "-" for all zones. This + field has been deprecated and replaced by the parent + field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.ListClustersResponse: + ListClustersResponse is the result of + ListClustersRequest. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.ListClustersRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_clusters, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=20.0, + ), + default_timeout=20.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_cluster(self, + request: cluster_service.GetClusterRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Cluster: + r"""Gets the details for a specific cluster. + + Args: + request (:class:`google.container_v1beta1.types.GetClusterRequest`): + The request object. GetClusterRequest gets the settings + of a cluster. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster to retrieve. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.Cluster: + A Google Kubernetes Engine cluster. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.GetClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_cluster, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=20.0, + ), + default_timeout=20.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def create_cluster(self, + request: cluster_service.CreateClusterRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster: cluster_service.Cluster = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Creates a cluster, consisting of the specified number and type + of Google Compute Engine instances. + + By default, the cluster is created in the project's `default + network `__. + + One firewall is added for the cluster. After cluster creation, + the Kubelet creates routes for each node to allow the containers + on that node to communicate with all other instances in the + cluster. + + Finally, an entry is added to the project's global metadata + indicating which CIDR range the cluster is using. + + Args: + request (:class:`google.container_v1beta1.types.CreateClusterRequest`): + The request object. CreateClusterRequest creates a + cluster. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the + parent field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the parent field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster (:class:`google.container_v1beta1.types.Cluster`): + Required. A `cluster + resource `__ + + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.CreateClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster is not None: + request.cluster = cluster + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_cluster, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_cluster(self, + request: cluster_service.UpdateClusterRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + update: cluster_service.ClusterUpdate = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Updates the settings for a specific cluster. + + Args: + request (:class:`google.container_v1beta1.types.UpdateClusterRequest`): + The request object. UpdateClusterRequest updates the + settings of a cluster. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster to upgrade. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update (:class:`google.container_v1beta1.types.ClusterUpdate`): + Required. A description of the + update. + + This corresponds to the ``update`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, update]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.UpdateClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if update is not None: + request.update = update + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_cluster, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_node_pool(self, + request: cluster_service.UpdateNodePoolRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Updates the version and/or image type of a specific + node pool. + + Args: + request (:class:`google.container_v1beta1.types.UpdateNodePoolRequest`): + The request object. SetNodePoolVersionRequest updates + the version of a node pool. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + request = cluster_service.UpdateNodePoolRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_node_pool, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def set_node_pool_autoscaling(self, + request: cluster_service.SetNodePoolAutoscalingRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the autoscaling settings of a specific node + pool. + + Args: + request (:class:`google.container_v1beta1.types.SetNodePoolAutoscalingRequest`): + The request object. SetNodePoolAutoscalingRequest sets + the autoscaler settings of a node pool. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + request = cluster_service.SetNodePoolAutoscalingRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_node_pool_autoscaling, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def set_logging_service(self, + request: cluster_service.SetLoggingServiceRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + logging_service: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the logging service for a specific cluster. + + Args: + request (:class:`google.container_v1beta1.types.SetLoggingServiceRequest`): + The request object. SetLoggingServiceRequest sets the + logging service of a cluster. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster to upgrade. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + logging_service (:class:`str`): + Required. The logging service the cluster should use to + write logs. Currently available options: + + - ``logging.googleapis.com/kubernetes`` - The Cloud + Logging service with a Kubernetes-native resource + model + - ``logging.googleapis.com`` - The legacy Cloud Logging + service (no longer available as of GKE 1.15). + - ``none`` - no logs will be exported from the cluster. + + If left as an empty + string,\ ``logging.googleapis.com/kubernetes`` will be + used for GKE 1.14+ or ``logging.googleapis.com`` for + earlier versions. + + This corresponds to the ``logging_service`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, logging_service]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.SetLoggingServiceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if logging_service is not None: + request.logging_service = logging_service + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_logging_service, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def set_monitoring_service(self, + request: cluster_service.SetMonitoringServiceRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + monitoring_service: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the monitoring service for a specific cluster. + + Args: + request (:class:`google.container_v1beta1.types.SetMonitoringServiceRequest`): + The request object. SetMonitoringServiceRequest sets the + monitoring service of a cluster. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster to upgrade. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + monitoring_service (:class:`str`): + Required. The monitoring service the cluster should use + to write metrics. Currently available options: + + - "monitoring.googleapis.com/kubernetes" - The Cloud + Monitoring service with a Kubernetes-native resource + model + - ``monitoring.googleapis.com`` - The legacy Cloud + Monitoring service (no longer available as of GKE + 1.15). + - ``none`` - No metrics will be exported from the + cluster. + + If left as an empty + string,\ ``monitoring.googleapis.com/kubernetes`` will + be used for GKE 1.14+ or ``monitoring.googleapis.com`` + for earlier versions. + + This corresponds to the ``monitoring_service`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, monitoring_service]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.SetMonitoringServiceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if monitoring_service is not None: + request.monitoring_service = monitoring_service + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_monitoring_service, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def set_addons_config(self, + request: cluster_service.SetAddonsConfigRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + addons_config: cluster_service.AddonsConfig = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the addons for a specific cluster. + + Args: + request (:class:`google.container_v1beta1.types.SetAddonsConfigRequest`): + The request object. SetAddonsRequest sets the addons + associated with the cluster. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster to upgrade. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + addons_config (:class:`google.container_v1beta1.types.AddonsConfig`): + Required. The desired configurations + for the various addons available to run + in the cluster. + + This corresponds to the ``addons_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, addons_config]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.SetAddonsConfigRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if addons_config is not None: + request.addons_config = addons_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_addons_config, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def set_locations(self, + request: cluster_service.SetLocationsRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + locations: Sequence[str] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the locations for a specific cluster. Deprecated. Use + `projects.locations.clusters.update `__ + instead. + + Args: + request (:class:`google.container_v1beta1.types.SetLocationsRequest`): + The request object. SetLocationsRequest sets the + locations of the cluster. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster to upgrade. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + locations (:class:`Sequence[str]`): + Required. The desired list of Google Compute Engine + `zones `__ + in which the cluster's nodes should be located. Changing + the locations a cluster is in will result in nodes being + either created or removed from the cluster, depending on + whether locations are being added or removed. + + This list must always include the cluster's primary + zone. + + This corresponds to the ``locations`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + warnings.warn("ClusterManagerAsyncClient.set_locations is deprecated", + DeprecationWarning) + + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, locations]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.SetLocationsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if locations: + request.locations.extend(locations) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_locations, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_master(self, + request: cluster_service.UpdateMasterRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + master_version: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Updates the master for a specific cluster. + + Args: + request (:class:`google.container_v1beta1.types.UpdateMasterRequest`): + The request object. UpdateMasterRequest updates the + master of the cluster. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster to upgrade. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + master_version (:class:`str`): + Required. The Kubernetes version to + change the master to. + Users may specify either explicit + versions offered by Kubernetes Engine or + version aliases, which have the + following behavior: + - "latest": picks the highest valid + Kubernetes version - "1.X": picks the + highest valid patch+gke.N patch in the + 1.X version - "1.X.Y": picks the highest + valid gke.N patch in the 1.X.Y version - + "1.X.Y-gke.N": picks an explicit + Kubernetes version - "-": picks the + default Kubernetes version + + This corresponds to the ``master_version`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, master_version]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.UpdateMasterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if master_version is not None: + request.master_version = master_version + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_master, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def set_master_auth(self, + request: cluster_service.SetMasterAuthRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets master auth materials. Currently supports + changing the admin password or a specific cluster, + either via password generation or explicitly setting the + password. + + Args: + request (:class:`google.container_v1beta1.types.SetMasterAuthRequest`): + The request object. SetMasterAuthRequest updates the + admin password of a cluster. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + request = cluster_service.SetMasterAuthRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_master_auth, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_cluster(self, + request: cluster_service.DeleteClusterRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Deletes the cluster, including the Kubernetes + endpoint and all worker nodes. + + Firewalls and routes that were configured during cluster + creation are also deleted. + + Other Google Compute Engine resources that might be in + use by the cluster, such as load balancer resources, are + not deleted if they weren't present when the cluster was + initially created. + + Args: + request (:class:`google.container_v1beta1.types.DeleteClusterRequest`): + The request object. DeleteClusterRequest deletes a + cluster. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster to delete. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.DeleteClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_cluster, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=20.0, + ), + default_timeout=20.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_operations(self, + request: cluster_service.ListOperationsRequest = None, + *, + project_id: str = None, + zone: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.ListOperationsResponse: + r"""Lists all operations in a project in the specified + zone or all zones. + + Args: + request (:class:`google.container_v1beta1.types.ListOperationsRequest`): + The request object. ListOperationsRequest lists + operations. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the + parent field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + to return operations for, or ``-`` for all zones. This + field has been deprecated and replaced by the parent + field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.ListOperationsResponse: + ListOperationsResponse is the result + of ListOperationsRequest. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.ListOperationsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_operations, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=20.0, + ), + default_timeout=20.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_operation(self, + request: cluster_service.GetOperationRequest = None, + *, + project_id: str = None, + zone: str = None, + operation_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Gets the specified operation. + + Args: + request (:class:`google.container_v1beta1.types.GetOperationRequest`): + The request object. GetOperationRequest gets a single + operation. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + operation_id (:class:`str`): + Required. Deprecated. The server-assigned ``name`` of + the operation. This field has been deprecated and + replaced by the name field. + + This corresponds to the ``operation_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, operation_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.GetOperationRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if operation_id is not None: + request.operation_id = operation_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_operation, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=20.0, + ), + default_timeout=20.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def cancel_operation(self, + request: cluster_service.CancelOperationRequest = None, + *, + project_id: str = None, + zone: str = None, + operation_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels the specified operation. + + Args: + request (:class:`google.container_v1beta1.types.CancelOperationRequest`): + The request object. CancelOperationRequest cancels a + single operation. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the operation resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + operation_id (:class:`str`): + Required. Deprecated. The server-assigned ``name`` of + the operation. This field has been deprecated and + replaced by the name field. + + This corresponds to the ``operation_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, operation_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.CancelOperationRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if operation_id is not None: + request.operation_id = operation_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.cancel_operation, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def get_server_config(self, + request: cluster_service.GetServerConfigRequest = None, + *, + project_id: str = None, + zone: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.ServerConfig: + r"""Returns configuration info about the Google + Kubernetes Engine service. + + Args: + request (:class:`google.container_v1beta1.types.GetServerConfigRequest`): + The request object. Gets the current Kubernetes Engine + service configuration. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + to return operations for. This field has been deprecated + and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.ServerConfig: + Kubernetes Engine service + configuration. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.GetServerConfigRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_server_config, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=20.0, + ), + default_timeout=20.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_node_pools(self, + request: cluster_service.ListNodePoolsRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.ListNodePoolsResponse: + r"""Lists the node pools for a cluster. + + Args: + request (:class:`google.container_v1beta1.types.ListNodePoolsRequest`): + The request object. ListNodePoolsRequest lists the node + pool(s) for a cluster. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the + parent field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the parent field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster. This field has been deprecated + and replaced by the parent field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.ListNodePoolsResponse: + ListNodePoolsResponse is the result + of ListNodePoolsRequest. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.ListNodePoolsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_node_pools, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=20.0, + ), + default_timeout=20.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_json_web_keys(self, + request: cluster_service.GetJSONWebKeysRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.GetJSONWebKeysResponse: + r"""Gets the public component of the cluster signing keys + in JSON Web Key format. + This API is not yet intended for general use, and is not + available for all clusters. + + Args: + request (:class:`google.container_v1beta1.types.GetJSONWebKeysRequest`): + The request object. GetJSONWebKeysRequest gets the + public component of the keys used by the cluster to sign + token requests. This will be the jwks_uri for the + discover document returned by getOpenIDConfig. See the + OpenID Connect Discovery 1.0 specification for details. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.GetJSONWebKeysResponse: + GetJSONWebKeysResponse is a valid + JSON Web Key Set as specififed in rfc + 7517 + + """ + # Create or coerce a protobuf request object. + request = cluster_service.GetJSONWebKeysRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_json_web_keys, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_node_pool(self, + request: cluster_service.GetNodePoolRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + node_pool_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.NodePool: + r"""Retrieves the requested node pool. + + Args: + request (:class:`google.container_v1beta1.types.GetNodePoolRequest`): + The request object. GetNodePoolRequest retrieves a node + pool for a cluster. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster. This field has been deprecated + and replaced by the name field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_pool_id (:class:`str`): + Required. Deprecated. The name of the + node pool. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``node_pool_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.NodePool: + NodePool contains the name and + configuration for a cluster's node pool. + Node pools are a set of nodes (i.e. + VM's), with a common configuration and + specification, under the control of the + cluster master. They may have a set of + Kubernetes labels applied to them, which + may be used to reference them during pod + scheduling. They may also be resized up + or down, to accommodate the workload. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, node_pool_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.GetNodePoolRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if node_pool_id is not None: + request.node_pool_id = node_pool_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_node_pool, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=20.0, + ), + default_timeout=20.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def create_node_pool(self, + request: cluster_service.CreateNodePoolRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + node_pool: cluster_service.NodePool = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Creates a node pool for a cluster. + + Args: + request (:class:`google.container_v1beta1.types.CreateNodePoolRequest`): + The request object. CreateNodePoolRequest creates a node + pool for a cluster. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the + parent field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the parent field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster. This field has been deprecated + and replaced by the parent field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_pool (:class:`google.container_v1beta1.types.NodePool`): + Required. The node pool to create. + This corresponds to the ``node_pool`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, node_pool]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.CreateNodePoolRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if node_pool is not None: + request.node_pool = node_pool + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_node_pool, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_node_pool(self, + request: cluster_service.DeleteNodePoolRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + node_pool_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Deletes a node pool from a cluster. + + Args: + request (:class:`google.container_v1beta1.types.DeleteNodePoolRequest`): + The request object. DeleteNodePoolRequest deletes a node + pool for a cluster. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster. This field has been deprecated + and replaced by the name field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_pool_id (:class:`str`): + Required. Deprecated. The name of the + node pool to delete. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``node_pool_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, node_pool_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.DeleteNodePoolRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if node_pool_id is not None: + request.node_pool_id = node_pool_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_node_pool, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=20.0, + ), + default_timeout=20.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def rollback_node_pool_upgrade(self, + request: cluster_service.RollbackNodePoolUpgradeRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + node_pool_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Rolls back a previously Aborted or Failed NodePool + upgrade. This makes no changes if the last upgrade + successfully completed. + + Args: + request (:class:`google.container_v1beta1.types.RollbackNodePoolUpgradeRequest`): + The request object. RollbackNodePoolUpgradeRequest + rollbacks the previously Aborted or Failed NodePool + upgrade. This will be an no-op if the last upgrade + successfully completed. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster to rollback. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_pool_id (:class:`str`): + Required. Deprecated. The name of the + node pool to rollback. This field has + been deprecated and replaced by the name + field. + + This corresponds to the ``node_pool_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, node_pool_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.RollbackNodePoolUpgradeRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if node_pool_id is not None: + request.node_pool_id = node_pool_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.rollback_node_pool_upgrade, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def set_node_pool_management(self, + request: cluster_service.SetNodePoolManagementRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + node_pool_id: str = None, + management: cluster_service.NodeManagement = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the NodeManagement options for a node pool. + + Args: + request (:class:`google.container_v1beta1.types.SetNodePoolManagementRequest`): + The request object. SetNodePoolManagementRequest sets + the node management properties of a node pool. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster to update. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_pool_id (:class:`str`): + Required. Deprecated. The name of the + node pool to update. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``node_pool_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + management (:class:`google.container_v1beta1.types.NodeManagement`): + Required. NodeManagement + configuration for the node pool. + + This corresponds to the ``management`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, node_pool_id, management]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.SetNodePoolManagementRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if node_pool_id is not None: + request.node_pool_id = node_pool_id + if management is not None: + request.management = management + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_node_pool_management, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def set_labels(self, + request: cluster_service.SetLabelsRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + resource_labels: Sequence[cluster_service.SetLabelsRequest.ResourceLabelsEntry] = None, + label_fingerprint: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets labels on a cluster. + + Args: + request (:class:`google.container_v1beta1.types.SetLabelsRequest`): + The request object. SetLabelsRequest sets the Google + Cloud Platform labels on a Google Container Engine + cluster, which will in turn set them for Google Compute + Engine resources used by that cluster + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster. This field has been deprecated + and replaced by the name field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource_labels (:class:`Sequence[google.container_v1beta1.types.SetLabelsRequest.ResourceLabelsEntry]`): + Required. The labels to set for that + cluster. + + This corresponds to the ``resource_labels`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + label_fingerprint (:class:`str`): + Required. The fingerprint of the previous set of labels + for this resource, used to detect conflicts. The + fingerprint is initially generated by Kubernetes Engine + and changes after every request to modify or update + labels. You must always provide an up-to-date + fingerprint hash when updating or changing labels. Make + a ``get()`` request to the resource to get the latest + fingerprint. + + This corresponds to the ``label_fingerprint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, resource_labels, label_fingerprint]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.SetLabelsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if label_fingerprint is not None: + request.label_fingerprint = label_fingerprint + + if resource_labels: + request.resource_labels.update(resource_labels) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_labels, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def set_legacy_abac(self, + request: cluster_service.SetLegacyAbacRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + enabled: bool = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Enables or disables the ABAC authorization mechanism + on a cluster. + + Args: + request (:class:`google.container_v1beta1.types.SetLegacyAbacRequest`): + The request object. SetLegacyAbacRequest enables or + disables the ABAC authorization mechanism for a cluster. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster to update. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + enabled (:class:`bool`): + Required. Whether ABAC authorization + will be enabled in the cluster. + + This corresponds to the ``enabled`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, enabled]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.SetLegacyAbacRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if enabled is not None: + request.enabled = enabled + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_legacy_abac, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def start_ip_rotation(self, + request: cluster_service.StartIPRotationRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Starts master IP rotation. + + Args: + request (:class:`google.container_v1beta1.types.StartIPRotationRequest`): + The request object. StartIPRotationRequest creates a new + IP for the cluster and then performs a node upgrade on + each node pool to point to the new IP. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster. This field has been deprecated + and replaced by the name field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.StartIPRotationRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.start_ip_rotation, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def complete_ip_rotation(self, + request: cluster_service.CompleteIPRotationRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Completes master IP rotation. + + Args: + request (:class:`google.container_v1beta1.types.CompleteIPRotationRequest`): + The request object. CompleteIPRotationRequest moves the + cluster master back into single-IP mode. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster. This field has been deprecated + and replaced by the name field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.CompleteIPRotationRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.complete_ip_rotation, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def set_node_pool_size(self, + request: cluster_service.SetNodePoolSizeRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the size for a specific node pool. + + Args: + request (:class:`google.container_v1beta1.types.SetNodePoolSizeRequest`): + The request object. SetNodePoolSizeRequest sets the size + a node pool. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + request = cluster_service.SetNodePoolSizeRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_node_pool_size, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def set_network_policy(self, + request: cluster_service.SetNetworkPolicyRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + network_policy: cluster_service.NetworkPolicy = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Enables or disables Network Policy for a cluster. + + Args: + request (:class:`google.container_v1beta1.types.SetNetworkPolicyRequest`): + The request object. SetNetworkPolicyRequest + enables/disables network policy for a cluster. + project_id (:class:`str`): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. Deprecated. The name of the + cluster. This field has been deprecated + and replaced by the name field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + network_policy (:class:`google.container_v1beta1.types.NetworkPolicy`): + Required. Configuration options for + the NetworkPolicy feature. + + This corresponds to the ``network_policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, network_policy]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.SetNetworkPolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if network_policy is not None: + request.network_policy = network_policy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_network_policy, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def set_maintenance_policy(self, + request: cluster_service.SetMaintenancePolicyRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + maintenance_policy: cluster_service.MaintenancePolicy = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the maintenance policy for a cluster. + + Args: + request (:class:`google.container_v1beta1.types.SetMaintenancePolicyRequest`): + The request object. SetMaintenancePolicyRequest sets the + maintenance policy for a cluster. + project_id (:class:`str`): + Required. The Google Developers Console `project ID or + project + number `__. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (:class:`str`): + Required. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. The name of the cluster to + update. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + maintenance_policy (:class:`google.container_v1beta1.types.MaintenancePolicy`): + Required. The maintenance policy to + be set for the cluster. An empty field + clears the existing maintenance policy. + + This corresponds to the ``maintenance_policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, maintenance_policy]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.SetMaintenancePolicyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if maintenance_policy is not None: + request.maintenance_policy = maintenance_policy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_maintenance_policy, + default_timeout=45.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_usable_subnetworks(self, + request: cluster_service.ListUsableSubnetworksRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListUsableSubnetworksAsyncPager: + r"""Lists subnetworks that can be used for creating + clusters in a project. + + Args: + request (:class:`google.container_v1beta1.types.ListUsableSubnetworksRequest`): + The request object. ListUsableSubnetworksRequest + requests the list of usable subnetworks. available to a + user for creating clusters. + parent (:class:`str`): + Required. The parent project where subnetworks are + usable. Specified in the format ``projects/*``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.services.cluster_manager.pagers.ListUsableSubnetworksAsyncPager: + ListUsableSubnetworksResponse is the + response of + ListUsableSubnetworksRequest. + + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.ListUsableSubnetworksRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_usable_subnetworks, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=20.0, + ), + default_timeout=20.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListUsableSubnetworksAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_locations(self, + request: cluster_service.ListLocationsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.ListLocationsResponse: + r"""Fetches locations that offer Google Kubernetes + Engine. + + Args: + request (:class:`google.container_v1beta1.types.ListLocationsRequest`): + The request object. ListLocationsRequest is used to + request the locations that offer GKE. + parent (:class:`str`): + Required. Contains the name of the resource requested. + Specified in the format ``projects/*``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.ListLocationsResponse: + ListLocationsResponse returns the + list of all GKE locations and their + recommendation state. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = cluster_service.ListLocationsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_locations, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=20.0, + ), + default_timeout=20.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-container", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "ClusterManagerAsyncClient", +) diff --git a/owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/client.py b/owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/client.py new file mode 100644 index 00000000..2f6dfa92 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/client.py @@ -0,0 +1,3750 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from distutils import util +import os +import re +from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources +import warnings + +from google.api_core import client_options as client_options_lib # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.container_v1beta1.services.cluster_manager import pagers +from google.container_v1beta1.types import cluster_service +from google.rpc import status_pb2 # type: ignore +from .transports.base import ClusterManagerTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import ClusterManagerGrpcTransport +from .transports.grpc_asyncio import ClusterManagerGrpcAsyncIOTransport + + +class ClusterManagerClientMeta(type): + """Metaclass for the ClusterManager client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[ClusterManagerTransport]] + _transport_registry["grpc"] = ClusterManagerGrpcTransport + _transport_registry["grpc_asyncio"] = ClusterManagerGrpcAsyncIOTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[ClusterManagerTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class ClusterManagerClient(metaclass=ClusterManagerClientMeta): + """Google Kubernetes Engine Cluster Manager v1beta1""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "container.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ClusterManagerClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ClusterManagerClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> ClusterManagerTransport: + """Returns the transport used by the client instance. + + Returns: + ClusterManagerTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def topic_path(project: str,topic: str,) -> str: + """Returns a fully-qualified topic string.""" + return "projects/{project}/topics/{topic}".format(project=project, topic=topic, ) + + @staticmethod + def parse_topic_path(path: str) -> Dict[str,str]: + """Parses a topic path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/topics/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, ClusterManagerTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the cluster manager client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ClusterManagerTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, ClusterManagerTransport): + # transport is a ClusterManagerTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=( + Transport == type(self).get_transport_class("grpc") + or Transport == type(self).get_transport_class("grpc_asyncio") + ), + ) + + def list_clusters(self, + request: cluster_service.ListClustersRequest = None, + *, + project_id: str = None, + zone: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.ListClustersResponse: + r"""Lists all clusters owned by a project in either the + specified zone or all zones. + + Args: + request (google.container_v1beta1.types.ListClustersRequest): + The request object. ListClustersRequest lists clusters. + project_id (str): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the + parent field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides, or "-" for all zones. This + field has been deprecated and replaced by the parent + field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.ListClustersResponse: + ListClustersResponse is the result of + ListClustersRequest. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.ListClustersRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.ListClustersRequest): + request = cluster_service.ListClustersRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_clusters] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_cluster(self, + request: cluster_service.GetClusterRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Cluster: + r"""Gets the details for a specific cluster. + + Args: + request (google.container_v1beta1.types.GetClusterRequest): + The request object. GetClusterRequest gets the settings + of a cluster. + project_id (str): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (str): + Required. Deprecated. The name of the + cluster to retrieve. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.Cluster: + A Google Kubernetes Engine cluster. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.GetClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.GetClusterRequest): + request = cluster_service.GetClusterRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_cluster] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def create_cluster(self, + request: cluster_service.CreateClusterRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster: cluster_service.Cluster = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Creates a cluster, consisting of the specified number and type + of Google Compute Engine instances. + + By default, the cluster is created in the project's `default + network `__. + + One firewall is added for the cluster. After cluster creation, + the Kubelet creates routes for each node to allow the containers + on that node to communicate with all other instances in the + cluster. + + Finally, an entry is added to the project's global metadata + indicating which CIDR range the cluster is using. + + Args: + request (google.container_v1beta1.types.CreateClusterRequest): + The request object. CreateClusterRequest creates a + cluster. + project_id (str): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the + parent field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the parent field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster (google.container_v1beta1.types.Cluster): + Required. A `cluster + resource `__ + + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.CreateClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.CreateClusterRequest): + request = cluster_service.CreateClusterRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster is not None: + request.cluster = cluster + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_cluster] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_cluster(self, + request: cluster_service.UpdateClusterRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + update: cluster_service.ClusterUpdate = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Updates the settings for a specific cluster. + + Args: + request (google.container_v1beta1.types.UpdateClusterRequest): + The request object. UpdateClusterRequest updates the + settings of a cluster. + project_id (str): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (str): + Required. Deprecated. The name of the + cluster to upgrade. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update (google.container_v1beta1.types.ClusterUpdate): + Required. A description of the + update. + + This corresponds to the ``update`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, update]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.UpdateClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.UpdateClusterRequest): + request = cluster_service.UpdateClusterRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if update is not None: + request.update = update + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_cluster] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_node_pool(self, + request: cluster_service.UpdateNodePoolRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Updates the version and/or image type of a specific + node pool. + + Args: + request (google.container_v1beta1.types.UpdateNodePoolRequest): + The request object. SetNodePoolVersionRequest updates + the version of a node pool. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.UpdateNodePoolRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.UpdateNodePoolRequest): + request = cluster_service.UpdateNodePoolRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_node_pool] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_node_pool_autoscaling(self, + request: cluster_service.SetNodePoolAutoscalingRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the autoscaling settings of a specific node + pool. + + Args: + request (google.container_v1beta1.types.SetNodePoolAutoscalingRequest): + The request object. SetNodePoolAutoscalingRequest sets + the autoscaler settings of a node pool. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.SetNodePoolAutoscalingRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.SetNodePoolAutoscalingRequest): + request = cluster_service.SetNodePoolAutoscalingRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_node_pool_autoscaling] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_logging_service(self, + request: cluster_service.SetLoggingServiceRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + logging_service: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the logging service for a specific cluster. + + Args: + request (google.container_v1beta1.types.SetLoggingServiceRequest): + The request object. SetLoggingServiceRequest sets the + logging service of a cluster. + project_id (str): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (str): + Required. Deprecated. The name of the + cluster to upgrade. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + logging_service (str): + Required. The logging service the cluster should use to + write logs. Currently available options: + + - ``logging.googleapis.com/kubernetes`` - The Cloud + Logging service with a Kubernetes-native resource + model + - ``logging.googleapis.com`` - The legacy Cloud Logging + service (no longer available as of GKE 1.15). + - ``none`` - no logs will be exported from the cluster. + + If left as an empty + string,\ ``logging.googleapis.com/kubernetes`` will be + used for GKE 1.14+ or ``logging.googleapis.com`` for + earlier versions. + + This corresponds to the ``logging_service`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, logging_service]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.SetLoggingServiceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.SetLoggingServiceRequest): + request = cluster_service.SetLoggingServiceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if logging_service is not None: + request.logging_service = logging_service + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_logging_service] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_monitoring_service(self, + request: cluster_service.SetMonitoringServiceRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + monitoring_service: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the monitoring service for a specific cluster. + + Args: + request (google.container_v1beta1.types.SetMonitoringServiceRequest): + The request object. SetMonitoringServiceRequest sets the + monitoring service of a cluster. + project_id (str): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (str): + Required. Deprecated. The name of the + cluster to upgrade. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + monitoring_service (str): + Required. The monitoring service the cluster should use + to write metrics. Currently available options: + + - "monitoring.googleapis.com/kubernetes" - The Cloud + Monitoring service with a Kubernetes-native resource + model + - ``monitoring.googleapis.com`` - The legacy Cloud + Monitoring service (no longer available as of GKE + 1.15). + - ``none`` - No metrics will be exported from the + cluster. + + If left as an empty + string,\ ``monitoring.googleapis.com/kubernetes`` will + be used for GKE 1.14+ or ``monitoring.googleapis.com`` + for earlier versions. + + This corresponds to the ``monitoring_service`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, monitoring_service]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.SetMonitoringServiceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.SetMonitoringServiceRequest): + request = cluster_service.SetMonitoringServiceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if monitoring_service is not None: + request.monitoring_service = monitoring_service + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_monitoring_service] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_addons_config(self, + request: cluster_service.SetAddonsConfigRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + addons_config: cluster_service.AddonsConfig = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the addons for a specific cluster. + + Args: + request (google.container_v1beta1.types.SetAddonsConfigRequest): + The request object. SetAddonsRequest sets the addons + associated with the cluster. + project_id (str): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (str): + Required. Deprecated. The name of the + cluster to upgrade. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + addons_config (google.container_v1beta1.types.AddonsConfig): + Required. The desired configurations + for the various addons available to run + in the cluster. + + This corresponds to the ``addons_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, addons_config]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.SetAddonsConfigRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.SetAddonsConfigRequest): + request = cluster_service.SetAddonsConfigRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if addons_config is not None: + request.addons_config = addons_config + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_addons_config] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_locations(self, + request: cluster_service.SetLocationsRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + locations: Sequence[str] = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the locations for a specific cluster. Deprecated. Use + `projects.locations.clusters.update `__ + instead. + + Args: + request (google.container_v1beta1.types.SetLocationsRequest): + The request object. SetLocationsRequest sets the + locations of the cluster. + project_id (str): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (str): + Required. Deprecated. The name of the + cluster to upgrade. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + locations (Sequence[str]): + Required. The desired list of Google Compute Engine + `zones `__ + in which the cluster's nodes should be located. Changing + the locations a cluster is in will result in nodes being + either created or removed from the cluster, depending on + whether locations are being added or removed. + + This list must always include the cluster's primary + zone. + + This corresponds to the ``locations`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + warnings.warn("ClusterManagerClient.set_locations is deprecated", + DeprecationWarning) + + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, locations]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.SetLocationsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.SetLocationsRequest): + request = cluster_service.SetLocationsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if locations is not None: + request.locations = locations + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_locations] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_master(self, + request: cluster_service.UpdateMasterRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + master_version: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Updates the master for a specific cluster. + + Args: + request (google.container_v1beta1.types.UpdateMasterRequest): + The request object. UpdateMasterRequest updates the + master of the cluster. + project_id (str): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (str): + Required. Deprecated. The name of the + cluster to upgrade. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + master_version (str): + Required. The Kubernetes version to + change the master to. + Users may specify either explicit + versions offered by Kubernetes Engine or + version aliases, which have the + following behavior: + - "latest": picks the highest valid + Kubernetes version - "1.X": picks the + highest valid patch+gke.N patch in the + 1.X version - "1.X.Y": picks the highest + valid gke.N patch in the 1.X.Y version - + "1.X.Y-gke.N": picks an explicit + Kubernetes version - "-": picks the + default Kubernetes version + + This corresponds to the ``master_version`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, master_version]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.UpdateMasterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.UpdateMasterRequest): + request = cluster_service.UpdateMasterRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if master_version is not None: + request.master_version = master_version + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_master] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_master_auth(self, + request: cluster_service.SetMasterAuthRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets master auth materials. Currently supports + changing the admin password or a specific cluster, + either via password generation or explicitly setting the + password. + + Args: + request (google.container_v1beta1.types.SetMasterAuthRequest): + The request object. SetMasterAuthRequest updates the + admin password of a cluster. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.SetMasterAuthRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.SetMasterAuthRequest): + request = cluster_service.SetMasterAuthRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_master_auth] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_cluster(self, + request: cluster_service.DeleteClusterRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Deletes the cluster, including the Kubernetes + endpoint and all worker nodes. + + Firewalls and routes that were configured during cluster + creation are also deleted. + + Other Google Compute Engine resources that might be in + use by the cluster, such as load balancer resources, are + not deleted if they weren't present when the cluster was + initially created. + + Args: + request (google.container_v1beta1.types.DeleteClusterRequest): + The request object. DeleteClusterRequest deletes a + cluster. + project_id (str): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (str): + Required. Deprecated. The name of the + cluster to delete. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.DeleteClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.DeleteClusterRequest): + request = cluster_service.DeleteClusterRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_cluster] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_operations(self, + request: cluster_service.ListOperationsRequest = None, + *, + project_id: str = None, + zone: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.ListOperationsResponse: + r"""Lists all operations in a project in the specified + zone or all zones. + + Args: + request (google.container_v1beta1.types.ListOperationsRequest): + The request object. ListOperationsRequest lists + operations. + project_id (str): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the + parent field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + to return operations for, or ``-`` for all zones. This + field has been deprecated and replaced by the parent + field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.ListOperationsResponse: + ListOperationsResponse is the result + of ListOperationsRequest. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.ListOperationsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.ListOperationsRequest): + request = cluster_service.ListOperationsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_operations] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_operation(self, + request: cluster_service.GetOperationRequest = None, + *, + project_id: str = None, + zone: str = None, + operation_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Gets the specified operation. + + Args: + request (google.container_v1beta1.types.GetOperationRequest): + The request object. GetOperationRequest gets a single + operation. + project_id (str): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + operation_id (str): + Required. Deprecated. The server-assigned ``name`` of + the operation. This field has been deprecated and + replaced by the name field. + + This corresponds to the ``operation_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, operation_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.GetOperationRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.GetOperationRequest): + request = cluster_service.GetOperationRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if operation_id is not None: + request.operation_id = operation_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_operation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def cancel_operation(self, + request: cluster_service.CancelOperationRequest = None, + *, + project_id: str = None, + zone: str = None, + operation_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Cancels the specified operation. + + Args: + request (google.container_v1beta1.types.CancelOperationRequest): + The request object. CancelOperationRequest cancels a + single operation. + project_id (str): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the operation resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + operation_id (str): + Required. Deprecated. The server-assigned ``name`` of + the operation. This field has been deprecated and + replaced by the name field. + + This corresponds to the ``operation_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, operation_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.CancelOperationRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.CancelOperationRequest): + request = cluster_service.CancelOperationRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if operation_id is not None: + request.operation_id = operation_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.cancel_operation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def get_server_config(self, + request: cluster_service.GetServerConfigRequest = None, + *, + project_id: str = None, + zone: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.ServerConfig: + r"""Returns configuration info about the Google + Kubernetes Engine service. + + Args: + request (google.container_v1beta1.types.GetServerConfigRequest): + The request object. Gets the current Kubernetes Engine + service configuration. + project_id (str): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + to return operations for. This field has been deprecated + and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.ServerConfig: + Kubernetes Engine service + configuration. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.GetServerConfigRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.GetServerConfigRequest): + request = cluster_service.GetServerConfigRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_server_config] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_node_pools(self, + request: cluster_service.ListNodePoolsRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.ListNodePoolsResponse: + r"""Lists the node pools for a cluster. + + Args: + request (google.container_v1beta1.types.ListNodePoolsRequest): + The request object. ListNodePoolsRequest lists the node + pool(s) for a cluster. + project_id (str): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the + parent field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the parent field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (str): + Required. Deprecated. The name of the + cluster. This field has been deprecated + and replaced by the parent field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.ListNodePoolsResponse: + ListNodePoolsResponse is the result + of ListNodePoolsRequest. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.ListNodePoolsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.ListNodePoolsRequest): + request = cluster_service.ListNodePoolsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_node_pools] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_json_web_keys(self, + request: cluster_service.GetJSONWebKeysRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.GetJSONWebKeysResponse: + r"""Gets the public component of the cluster signing keys + in JSON Web Key format. + This API is not yet intended for general use, and is not + available for all clusters. + + Args: + request (google.container_v1beta1.types.GetJSONWebKeysRequest): + The request object. GetJSONWebKeysRequest gets the + public component of the keys used by the cluster to sign + token requests. This will be the jwks_uri for the + discover document returned by getOpenIDConfig. See the + OpenID Connect Discovery 1.0 specification for details. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.GetJSONWebKeysResponse: + GetJSONWebKeysResponse is a valid + JSON Web Key Set as specififed in rfc + 7517 + + """ + # Create or coerce a protobuf request object. + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.GetJSONWebKeysRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.GetJSONWebKeysRequest): + request = cluster_service.GetJSONWebKeysRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_json_web_keys] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_node_pool(self, + request: cluster_service.GetNodePoolRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + node_pool_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.NodePool: + r"""Retrieves the requested node pool. + + Args: + request (google.container_v1beta1.types.GetNodePoolRequest): + The request object. GetNodePoolRequest retrieves a node + pool for a cluster. + project_id (str): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (str): + Required. Deprecated. The name of the + cluster. This field has been deprecated + and replaced by the name field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_pool_id (str): + Required. Deprecated. The name of the + node pool. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``node_pool_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.NodePool: + NodePool contains the name and + configuration for a cluster's node pool. + Node pools are a set of nodes (i.e. + VM's), with a common configuration and + specification, under the control of the + cluster master. They may have a set of + Kubernetes labels applied to them, which + may be used to reference them during pod + scheduling. They may also be resized up + or down, to accommodate the workload. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, node_pool_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.GetNodePoolRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.GetNodePoolRequest): + request = cluster_service.GetNodePoolRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if node_pool_id is not None: + request.node_pool_id = node_pool_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_node_pool] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def create_node_pool(self, + request: cluster_service.CreateNodePoolRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + node_pool: cluster_service.NodePool = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Creates a node pool for a cluster. + + Args: + request (google.container_v1beta1.types.CreateNodePoolRequest): + The request object. CreateNodePoolRequest creates a node + pool for a cluster. + project_id (str): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the + parent field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the parent field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (str): + Required. Deprecated. The name of the + cluster. This field has been deprecated + and replaced by the parent field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_pool (google.container_v1beta1.types.NodePool): + Required. The node pool to create. + This corresponds to the ``node_pool`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, node_pool]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.CreateNodePoolRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.CreateNodePoolRequest): + request = cluster_service.CreateNodePoolRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if node_pool is not None: + request.node_pool = node_pool + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_node_pool] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_node_pool(self, + request: cluster_service.DeleteNodePoolRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + node_pool_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Deletes a node pool from a cluster. + + Args: + request (google.container_v1beta1.types.DeleteNodePoolRequest): + The request object. DeleteNodePoolRequest deletes a node + pool for a cluster. + project_id (str): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (str): + Required. Deprecated. The name of the + cluster. This field has been deprecated + and replaced by the name field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_pool_id (str): + Required. Deprecated. The name of the + node pool to delete. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``node_pool_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, node_pool_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.DeleteNodePoolRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.DeleteNodePoolRequest): + request = cluster_service.DeleteNodePoolRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if node_pool_id is not None: + request.node_pool_id = node_pool_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_node_pool] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def rollback_node_pool_upgrade(self, + request: cluster_service.RollbackNodePoolUpgradeRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + node_pool_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Rolls back a previously Aborted or Failed NodePool + upgrade. This makes no changes if the last upgrade + successfully completed. + + Args: + request (google.container_v1beta1.types.RollbackNodePoolUpgradeRequest): + The request object. RollbackNodePoolUpgradeRequest + rollbacks the previously Aborted or Failed NodePool + upgrade. This will be an no-op if the last upgrade + successfully completed. + project_id (str): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (str): + Required. Deprecated. The name of the + cluster to rollback. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_pool_id (str): + Required. Deprecated. The name of the + node pool to rollback. This field has + been deprecated and replaced by the name + field. + + This corresponds to the ``node_pool_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, node_pool_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.RollbackNodePoolUpgradeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.RollbackNodePoolUpgradeRequest): + request = cluster_service.RollbackNodePoolUpgradeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if node_pool_id is not None: + request.node_pool_id = node_pool_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.rollback_node_pool_upgrade] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_node_pool_management(self, + request: cluster_service.SetNodePoolManagementRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + node_pool_id: str = None, + management: cluster_service.NodeManagement = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the NodeManagement options for a node pool. + + Args: + request (google.container_v1beta1.types.SetNodePoolManagementRequest): + The request object. SetNodePoolManagementRequest sets + the node management properties of a node pool. + project_id (str): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (str): + Required. Deprecated. The name of the + cluster to update. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_pool_id (str): + Required. Deprecated. The name of the + node pool to update. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``node_pool_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + management (google.container_v1beta1.types.NodeManagement): + Required. NodeManagement + configuration for the node pool. + + This corresponds to the ``management`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, node_pool_id, management]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.SetNodePoolManagementRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.SetNodePoolManagementRequest): + request = cluster_service.SetNodePoolManagementRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if node_pool_id is not None: + request.node_pool_id = node_pool_id + if management is not None: + request.management = management + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_node_pool_management] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_labels(self, + request: cluster_service.SetLabelsRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + resource_labels: Sequence[cluster_service.SetLabelsRequest.ResourceLabelsEntry] = None, + label_fingerprint: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets labels on a cluster. + + Args: + request (google.container_v1beta1.types.SetLabelsRequest): + The request object. SetLabelsRequest sets the Google + Cloud Platform labels on a Google Container Engine + cluster, which will in turn set them for Google Compute + Engine resources used by that cluster + project_id (str): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (str): + Required. Deprecated. The name of the + cluster. This field has been deprecated + and replaced by the name field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource_labels (Sequence[google.container_v1beta1.types.SetLabelsRequest.ResourceLabelsEntry]): + Required. The labels to set for that + cluster. + + This corresponds to the ``resource_labels`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + label_fingerprint (str): + Required. The fingerprint of the previous set of labels + for this resource, used to detect conflicts. The + fingerprint is initially generated by Kubernetes Engine + and changes after every request to modify or update + labels. You must always provide an up-to-date + fingerprint hash when updating or changing labels. Make + a ``get()`` request to the resource to get the latest + fingerprint. + + This corresponds to the ``label_fingerprint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, resource_labels, label_fingerprint]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.SetLabelsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.SetLabelsRequest): + request = cluster_service.SetLabelsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if resource_labels is not None: + request.resource_labels = resource_labels + if label_fingerprint is not None: + request.label_fingerprint = label_fingerprint + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_labels] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_legacy_abac(self, + request: cluster_service.SetLegacyAbacRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + enabled: bool = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Enables or disables the ABAC authorization mechanism + on a cluster. + + Args: + request (google.container_v1beta1.types.SetLegacyAbacRequest): + The request object. SetLegacyAbacRequest enables or + disables the ABAC authorization mechanism for a cluster. + project_id (str): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (str): + Required. Deprecated. The name of the + cluster to update. This field has been + deprecated and replaced by the name + field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + enabled (bool): + Required. Whether ABAC authorization + will be enabled in the cluster. + + This corresponds to the ``enabled`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, enabled]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.SetLegacyAbacRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.SetLegacyAbacRequest): + request = cluster_service.SetLegacyAbacRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if enabled is not None: + request.enabled = enabled + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_legacy_abac] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def start_ip_rotation(self, + request: cluster_service.StartIPRotationRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Starts master IP rotation. + + Args: + request (google.container_v1beta1.types.StartIPRotationRequest): + The request object. StartIPRotationRequest creates a new + IP for the cluster and then performs a node upgrade on + each node pool to point to the new IP. + project_id (str): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (str): + Required. Deprecated. The name of the + cluster. This field has been deprecated + and replaced by the name field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.StartIPRotationRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.StartIPRotationRequest): + request = cluster_service.StartIPRotationRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.start_ip_rotation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def complete_ip_rotation(self, + request: cluster_service.CompleteIPRotationRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Completes master IP rotation. + + Args: + request (google.container_v1beta1.types.CompleteIPRotationRequest): + The request object. CompleteIPRotationRequest moves the + cluster master back into single-IP mode. + project_id (str): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (str): + Required. Deprecated. The name of the + cluster. This field has been deprecated + and replaced by the name field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.CompleteIPRotationRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.CompleteIPRotationRequest): + request = cluster_service.CompleteIPRotationRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.complete_ip_rotation] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_node_pool_size(self, + request: cluster_service.SetNodePoolSizeRequest = None, + *, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the size for a specific node pool. + + Args: + request (google.container_v1beta1.types.SetNodePoolSizeRequest): + The request object. SetNodePoolSizeRequest sets the size + a node pool. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.SetNodePoolSizeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.SetNodePoolSizeRequest): + request = cluster_service.SetNodePoolSizeRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_node_pool_size] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_network_policy(self, + request: cluster_service.SetNetworkPolicyRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + network_policy: cluster_service.NetworkPolicy = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Enables or disables Network Policy for a cluster. + + Args: + request (google.container_v1beta1.types.SetNetworkPolicyRequest): + The request object. SetNetworkPolicyRequest + enables/disables network policy for a cluster. + project_id (str): + Required. Deprecated. The Google Developers Console + `project ID or project + number `__. + This field has been deprecated and replaced by the name + field. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Required. Deprecated. The name of the Google Compute + Engine + `zone `__ + in which the cluster resides. This field has been + deprecated and replaced by the name field. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (str): + Required. Deprecated. The name of the + cluster. This field has been deprecated + and replaced by the name field. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + network_policy (google.container_v1beta1.types.NetworkPolicy): + Required. Configuration options for + the NetworkPolicy feature. + + This corresponds to the ``network_policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, network_policy]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.SetNetworkPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.SetNetworkPolicyRequest): + request = cluster_service.SetNetworkPolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if network_policy is not None: + request.network_policy = network_policy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_network_policy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_maintenance_policy(self, + request: cluster_service.SetMaintenancePolicyRequest = None, + *, + project_id: str = None, + zone: str = None, + cluster_id: str = None, + maintenance_policy: cluster_service.MaintenancePolicy = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.Operation: + r"""Sets the maintenance policy for a cluster. + + Args: + request (google.container_v1beta1.types.SetMaintenancePolicyRequest): + The request object. SetMaintenancePolicyRequest sets the + maintenance policy for a cluster. + project_id (str): + Required. The Google Developers Console `project ID or + project + number `__. + + This corresponds to the ``project_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Required. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (str): + Required. The name of the cluster to + update. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + maintenance_policy (google.container_v1beta1.types.MaintenancePolicy): + Required. The maintenance policy to + be set for the cluster. An empty field + clears the existing maintenance policy. + + This corresponds to the ``maintenance_policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.Operation: + This operation resource represents + operations that may have happened or are + happening on the cluster. All fields are + output only. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project_id, zone, cluster_id, maintenance_policy]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.SetMaintenancePolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.SetMaintenancePolicyRequest): + request = cluster_service.SetMaintenancePolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project_id is not None: + request.project_id = project_id + if zone is not None: + request.zone = zone + if cluster_id is not None: + request.cluster_id = cluster_id + if maintenance_policy is not None: + request.maintenance_policy = maintenance_policy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_maintenance_policy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_usable_subnetworks(self, + request: cluster_service.ListUsableSubnetworksRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListUsableSubnetworksPager: + r"""Lists subnetworks that can be used for creating + clusters in a project. + + Args: + request (google.container_v1beta1.types.ListUsableSubnetworksRequest): + The request object. ListUsableSubnetworksRequest + requests the list of usable subnetworks. available to a + user for creating clusters. + parent (str): + Required. The parent project where subnetworks are + usable. Specified in the format ``projects/*``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.services.cluster_manager.pagers.ListUsableSubnetworksPager: + ListUsableSubnetworksResponse is the + response of + ListUsableSubnetworksRequest. + + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.ListUsableSubnetworksRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.ListUsableSubnetworksRequest): + request = cluster_service.ListUsableSubnetworksRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_usable_subnetworks] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListUsableSubnetworksPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_locations(self, + request: cluster_service.ListLocationsRequest = None, + *, + parent: str = None, + retry: retries.Retry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> cluster_service.ListLocationsResponse: + r"""Fetches locations that offer Google Kubernetes + Engine. + + Args: + request (google.container_v1beta1.types.ListLocationsRequest): + The request object. ListLocationsRequest is used to + request the locations that offer GKE. + parent (str): + Required. Contains the name of the resource requested. + Specified in the format ``projects/*``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.container_v1beta1.types.ListLocationsResponse: + ListLocationsResponse returns the + list of all GKE locations and their + recommendation state. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a cluster_service.ListLocationsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, cluster_service.ListLocationsRequest): + request = cluster_service.ListLocationsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_locations] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-container", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "ClusterManagerClient", +) diff --git a/owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/pagers.py b/owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/pagers.py new file mode 100644 index 00000000..59e94e70 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/pagers.py @@ -0,0 +1,140 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional + +from google.container_v1beta1.types import cluster_service + + +class ListUsableSubnetworksPager: + """A pager for iterating through ``list_usable_subnetworks`` requests. + + This class thinly wraps an initial + :class:`google.container_v1beta1.types.ListUsableSubnetworksResponse` object, and + provides an ``__iter__`` method to iterate through its + ``subnetworks`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListUsableSubnetworks`` requests and continue to iterate + through the ``subnetworks`` field on the + corresponding responses. + + All the usual :class:`google.container_v1beta1.types.ListUsableSubnetworksResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., cluster_service.ListUsableSubnetworksResponse], + request: cluster_service.ListUsableSubnetworksRequest, + response: cluster_service.ListUsableSubnetworksResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.container_v1beta1.types.ListUsableSubnetworksRequest): + The initial request object. + response (google.container_v1beta1.types.ListUsableSubnetworksResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = cluster_service.ListUsableSubnetworksRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterable[cluster_service.ListUsableSubnetworksResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterable[cluster_service.UsableSubnetwork]: + for page in self.pages: + yield from page.subnetworks + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListUsableSubnetworksAsyncPager: + """A pager for iterating through ``list_usable_subnetworks`` requests. + + This class thinly wraps an initial + :class:`google.container_v1beta1.types.ListUsableSubnetworksResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``subnetworks`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListUsableSubnetworks`` requests and continue to iterate + through the ``subnetworks`` field on the + corresponding responses. + + All the usual :class:`google.container_v1beta1.types.ListUsableSubnetworksResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[cluster_service.ListUsableSubnetworksResponse]], + request: cluster_service.ListUsableSubnetworksRequest, + response: cluster_service.ListUsableSubnetworksResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.container_v1beta1.types.ListUsableSubnetworksRequest): + The initial request object. + response (google.container_v1beta1.types.ListUsableSubnetworksResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = cluster_service.ListUsableSubnetworksRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterable[cluster_service.ListUsableSubnetworksResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + + def __aiter__(self) -> AsyncIterable[cluster_service.UsableSubnetwork]: + async def async_generator(): + async for page in self.pages: + for response in page.subnetworks: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/transports/__init__.py b/owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/transports/__init__.py new file mode 100644 index 00000000..32ea8716 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/transports/__init__.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import ClusterManagerTransport +from .grpc import ClusterManagerGrpcTransport +from .grpc_asyncio import ClusterManagerGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[ClusterManagerTransport]] +_transport_registry['grpc'] = ClusterManagerGrpcTransport +_transport_registry['grpc_asyncio'] = ClusterManagerGrpcAsyncIOTransport + +__all__ = ( + 'ClusterManagerTransport', + 'ClusterManagerGrpcTransport', + 'ClusterManagerGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/transports/base.py b/owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/transports/base.py new file mode 100644 index 00000000..bb24b465 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/transports/base.py @@ -0,0 +1,694 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import packaging.version +import pkg_resources + +import google.auth # type: ignore +import google.api_core # type: ignore +from google.api_core import exceptions as core_exceptions # type: ignore +from google.api_core import gapic_v1 # type: ignore +from google.api_core import retry as retries # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.container_v1beta1.types import cluster_service +from google.protobuf import empty_pb2 # type: ignore + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-container', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + +try: + # google.auth.__version__ was added in 1.26.0 + _GOOGLE_AUTH_VERSION = google.auth.__version__ +except AttributeError: + try: # try pkg_resources if it is available + _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version + except pkg_resources.DistributionNotFound: # pragma: NO COVER + _GOOGLE_AUTH_VERSION = None + + +class ClusterManagerTransport(abc.ABC): + """Abstract transport class for ClusterManager.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'container.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials is service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # TODO(busunkim): This method is in the base transport + # to avoid duplicating code across the transport classes. These functions + # should be deleted once the minimum required versions of google-auth is increased. + + # TODO: Remove this function once google-auth >= 1.25.0 is required + @classmethod + def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: + """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" + + scopes_kwargs = {} + + if _GOOGLE_AUTH_VERSION and ( + packaging.version.parse(_GOOGLE_AUTH_VERSION) + >= packaging.version.parse("1.25.0") + ): + scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} + else: + scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} + + return scopes_kwargs + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.list_clusters: gapic_v1.method.wrap_method( + self.list_clusters, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=20.0, + ), + default_timeout=20.0, + client_info=client_info, + ), + self.get_cluster: gapic_v1.method.wrap_method( + self.get_cluster, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=20.0, + ), + default_timeout=20.0, + client_info=client_info, + ), + self.create_cluster: gapic_v1.method.wrap_method( + self.create_cluster, + default_timeout=45.0, + client_info=client_info, + ), + self.update_cluster: gapic_v1.method.wrap_method( + self.update_cluster, + default_timeout=45.0, + client_info=client_info, + ), + self.update_node_pool: gapic_v1.method.wrap_method( + self.update_node_pool, + default_timeout=45.0, + client_info=client_info, + ), + self.set_node_pool_autoscaling: gapic_v1.method.wrap_method( + self.set_node_pool_autoscaling, + default_timeout=45.0, + client_info=client_info, + ), + self.set_logging_service: gapic_v1.method.wrap_method( + self.set_logging_service, + default_timeout=45.0, + client_info=client_info, + ), + self.set_monitoring_service: gapic_v1.method.wrap_method( + self.set_monitoring_service, + default_timeout=45.0, + client_info=client_info, + ), + self.set_addons_config: gapic_v1.method.wrap_method( + self.set_addons_config, + default_timeout=45.0, + client_info=client_info, + ), + self.set_locations: gapic_v1.method.wrap_method( + self.set_locations, + default_timeout=45.0, + client_info=client_info, + ), + self.update_master: gapic_v1.method.wrap_method( + self.update_master, + default_timeout=45.0, + client_info=client_info, + ), + self.set_master_auth: gapic_v1.method.wrap_method( + self.set_master_auth, + default_timeout=45.0, + client_info=client_info, + ), + self.delete_cluster: gapic_v1.method.wrap_method( + self.delete_cluster, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=20.0, + ), + default_timeout=20.0, + client_info=client_info, + ), + self.list_operations: gapic_v1.method.wrap_method( + self.list_operations, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=20.0, + ), + default_timeout=20.0, + client_info=client_info, + ), + self.get_operation: gapic_v1.method.wrap_method( + self.get_operation, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=20.0, + ), + default_timeout=20.0, + client_info=client_info, + ), + self.cancel_operation: gapic_v1.method.wrap_method( + self.cancel_operation, + default_timeout=45.0, + client_info=client_info, + ), + self.get_server_config: gapic_v1.method.wrap_method( + self.get_server_config, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=20.0, + ), + default_timeout=20.0, + client_info=client_info, + ), + self.list_node_pools: gapic_v1.method.wrap_method( + self.list_node_pools, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=20.0, + ), + default_timeout=20.0, + client_info=client_info, + ), + self.get_json_web_keys: gapic_v1.method.wrap_method( + self.get_json_web_keys, + default_timeout=None, + client_info=client_info, + ), + self.get_node_pool: gapic_v1.method.wrap_method( + self.get_node_pool, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=20.0, + ), + default_timeout=20.0, + client_info=client_info, + ), + self.create_node_pool: gapic_v1.method.wrap_method( + self.create_node_pool, + default_timeout=45.0, + client_info=client_info, + ), + self.delete_node_pool: gapic_v1.method.wrap_method( + self.delete_node_pool, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=20.0, + ), + default_timeout=20.0, + client_info=client_info, + ), + self.rollback_node_pool_upgrade: gapic_v1.method.wrap_method( + self.rollback_node_pool_upgrade, + default_timeout=45.0, + client_info=client_info, + ), + self.set_node_pool_management: gapic_v1.method.wrap_method( + self.set_node_pool_management, + default_timeout=45.0, + client_info=client_info, + ), + self.set_labels: gapic_v1.method.wrap_method( + self.set_labels, + default_timeout=45.0, + client_info=client_info, + ), + self.set_legacy_abac: gapic_v1.method.wrap_method( + self.set_legacy_abac, + default_timeout=45.0, + client_info=client_info, + ), + self.start_ip_rotation: gapic_v1.method.wrap_method( + self.start_ip_rotation, + default_timeout=45.0, + client_info=client_info, + ), + self.complete_ip_rotation: gapic_v1.method.wrap_method( + self.complete_ip_rotation, + default_timeout=45.0, + client_info=client_info, + ), + self.set_node_pool_size: gapic_v1.method.wrap_method( + self.set_node_pool_size, + default_timeout=45.0, + client_info=client_info, + ), + self.set_network_policy: gapic_v1.method.wrap_method( + self.set_network_policy, + default_timeout=45.0, + client_info=client_info, + ), + self.set_maintenance_policy: gapic_v1.method.wrap_method( + self.set_maintenance_policy, + default_timeout=45.0, + client_info=client_info, + ), + self.list_usable_subnetworks: gapic_v1.method.wrap_method( + self.list_usable_subnetworks, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=20.0, + ), + default_timeout=20.0, + client_info=client_info, + ), + self.list_locations: gapic_v1.method.wrap_method( + self.list_locations, + default_retry=retries.Retry( +initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=20.0, + ), + default_timeout=20.0, + client_info=client_info, + ), + } + + @property + def list_clusters(self) -> Callable[ + [cluster_service.ListClustersRequest], + Union[ + cluster_service.ListClustersResponse, + Awaitable[cluster_service.ListClustersResponse] + ]]: + raise NotImplementedError() + + @property + def get_cluster(self) -> Callable[ + [cluster_service.GetClusterRequest], + Union[ + cluster_service.Cluster, + Awaitable[cluster_service.Cluster] + ]]: + raise NotImplementedError() + + @property + def create_cluster(self) -> Callable[ + [cluster_service.CreateClusterRequest], + Union[ + cluster_service.Operation, + Awaitable[cluster_service.Operation] + ]]: + raise NotImplementedError() + + @property + def update_cluster(self) -> Callable[ + [cluster_service.UpdateClusterRequest], + Union[ + cluster_service.Operation, + Awaitable[cluster_service.Operation] + ]]: + raise NotImplementedError() + + @property + def update_node_pool(self) -> Callable[ + [cluster_service.UpdateNodePoolRequest], + Union[ + cluster_service.Operation, + Awaitable[cluster_service.Operation] + ]]: + raise NotImplementedError() + + @property + def set_node_pool_autoscaling(self) -> Callable[ + [cluster_service.SetNodePoolAutoscalingRequest], + Union[ + cluster_service.Operation, + Awaitable[cluster_service.Operation] + ]]: + raise NotImplementedError() + + @property + def set_logging_service(self) -> Callable[ + [cluster_service.SetLoggingServiceRequest], + Union[ + cluster_service.Operation, + Awaitable[cluster_service.Operation] + ]]: + raise NotImplementedError() + + @property + def set_monitoring_service(self) -> Callable[ + [cluster_service.SetMonitoringServiceRequest], + Union[ + cluster_service.Operation, + Awaitable[cluster_service.Operation] + ]]: + raise NotImplementedError() + + @property + def set_addons_config(self) -> Callable[ + [cluster_service.SetAddonsConfigRequest], + Union[ + cluster_service.Operation, + Awaitable[cluster_service.Operation] + ]]: + raise NotImplementedError() + + @property + def set_locations(self) -> Callable[ + [cluster_service.SetLocationsRequest], + Union[ + cluster_service.Operation, + Awaitable[cluster_service.Operation] + ]]: + raise NotImplementedError() + + @property + def update_master(self) -> Callable[ + [cluster_service.UpdateMasterRequest], + Union[ + cluster_service.Operation, + Awaitable[cluster_service.Operation] + ]]: + raise NotImplementedError() + + @property + def set_master_auth(self) -> Callable[ + [cluster_service.SetMasterAuthRequest], + Union[ + cluster_service.Operation, + Awaitable[cluster_service.Operation] + ]]: + raise NotImplementedError() + + @property + def delete_cluster(self) -> Callable[ + [cluster_service.DeleteClusterRequest], + Union[ + cluster_service.Operation, + Awaitable[cluster_service.Operation] + ]]: + raise NotImplementedError() + + @property + def list_operations(self) -> Callable[ + [cluster_service.ListOperationsRequest], + Union[ + cluster_service.ListOperationsResponse, + Awaitable[cluster_service.ListOperationsResponse] + ]]: + raise NotImplementedError() + + @property + def get_operation(self) -> Callable[ + [cluster_service.GetOperationRequest], + Union[ + cluster_service.Operation, + Awaitable[cluster_service.Operation] + ]]: + raise NotImplementedError() + + @property + def cancel_operation(self) -> Callable[ + [cluster_service.CancelOperationRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + @property + def get_server_config(self) -> Callable[ + [cluster_service.GetServerConfigRequest], + Union[ + cluster_service.ServerConfig, + Awaitable[cluster_service.ServerConfig] + ]]: + raise NotImplementedError() + + @property + def list_node_pools(self) -> Callable[ + [cluster_service.ListNodePoolsRequest], + Union[ + cluster_service.ListNodePoolsResponse, + Awaitable[cluster_service.ListNodePoolsResponse] + ]]: + raise NotImplementedError() + + @property + def get_json_web_keys(self) -> Callable[ + [cluster_service.GetJSONWebKeysRequest], + Union[ + cluster_service.GetJSONWebKeysResponse, + Awaitable[cluster_service.GetJSONWebKeysResponse] + ]]: + raise NotImplementedError() + + @property + def get_node_pool(self) -> Callable[ + [cluster_service.GetNodePoolRequest], + Union[ + cluster_service.NodePool, + Awaitable[cluster_service.NodePool] + ]]: + raise NotImplementedError() + + @property + def create_node_pool(self) -> Callable[ + [cluster_service.CreateNodePoolRequest], + Union[ + cluster_service.Operation, + Awaitable[cluster_service.Operation] + ]]: + raise NotImplementedError() + + @property + def delete_node_pool(self) -> Callable[ + [cluster_service.DeleteNodePoolRequest], + Union[ + cluster_service.Operation, + Awaitable[cluster_service.Operation] + ]]: + raise NotImplementedError() + + @property + def rollback_node_pool_upgrade(self) -> Callable[ + [cluster_service.RollbackNodePoolUpgradeRequest], + Union[ + cluster_service.Operation, + Awaitable[cluster_service.Operation] + ]]: + raise NotImplementedError() + + @property + def set_node_pool_management(self) -> Callable[ + [cluster_service.SetNodePoolManagementRequest], + Union[ + cluster_service.Operation, + Awaitable[cluster_service.Operation] + ]]: + raise NotImplementedError() + + @property + def set_labels(self) -> Callable[ + [cluster_service.SetLabelsRequest], + Union[ + cluster_service.Operation, + Awaitable[cluster_service.Operation] + ]]: + raise NotImplementedError() + + @property + def set_legacy_abac(self) -> Callable[ + [cluster_service.SetLegacyAbacRequest], + Union[ + cluster_service.Operation, + Awaitable[cluster_service.Operation] + ]]: + raise NotImplementedError() + + @property + def start_ip_rotation(self) -> Callable[ + [cluster_service.StartIPRotationRequest], + Union[ + cluster_service.Operation, + Awaitable[cluster_service.Operation] + ]]: + raise NotImplementedError() + + @property + def complete_ip_rotation(self) -> Callable[ + [cluster_service.CompleteIPRotationRequest], + Union[ + cluster_service.Operation, + Awaitable[cluster_service.Operation] + ]]: + raise NotImplementedError() + + @property + def set_node_pool_size(self) -> Callable[ + [cluster_service.SetNodePoolSizeRequest], + Union[ + cluster_service.Operation, + Awaitable[cluster_service.Operation] + ]]: + raise NotImplementedError() + + @property + def set_network_policy(self) -> Callable[ + [cluster_service.SetNetworkPolicyRequest], + Union[ + cluster_service.Operation, + Awaitable[cluster_service.Operation] + ]]: + raise NotImplementedError() + + @property + def set_maintenance_policy(self) -> Callable[ + [cluster_service.SetMaintenancePolicyRequest], + Union[ + cluster_service.Operation, + Awaitable[cluster_service.Operation] + ]]: + raise NotImplementedError() + + @property + def list_usable_subnetworks(self) -> Callable[ + [cluster_service.ListUsableSubnetworksRequest], + Union[ + cluster_service.ListUsableSubnetworksResponse, + Awaitable[cluster_service.ListUsableSubnetworksResponse] + ]]: + raise NotImplementedError() + + @property + def list_locations(self) -> Callable[ + [cluster_service.ListLocationsRequest], + Union[ + cluster_service.ListLocationsResponse, + Awaitable[cluster_service.ListLocationsResponse] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'ClusterManagerTransport', +) diff --git a/owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/transports/grpc.py b/owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/transports/grpc.py new file mode 100644 index 00000000..59447074 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/transports/grpc.py @@ -0,0 +1,1124 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers # type: ignore +from google.api_core import gapic_v1 # type: ignore +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.container_v1beta1.types import cluster_service +from google.protobuf import empty_pb2 # type: ignore +from .base import ClusterManagerTransport, DEFAULT_CLIENT_INFO + + +class ClusterManagerGrpcTransport(ClusterManagerTransport): + """gRPC backend transport for ClusterManager. + + Google Kubernetes Engine Cluster Manager v1beta1 + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'container.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Sequence[str] = None, + channel: grpc.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'container.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: str = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def list_clusters(self) -> Callable[ + [cluster_service.ListClustersRequest], + cluster_service.ListClustersResponse]: + r"""Return a callable for the list clusters method over gRPC. + + Lists all clusters owned by a project in either the + specified zone or all zones. + + Returns: + Callable[[~.ListClustersRequest], + ~.ListClustersResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_clusters' not in self._stubs: + self._stubs['list_clusters'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/ListClusters', + request_serializer=cluster_service.ListClustersRequest.serialize, + response_deserializer=cluster_service.ListClustersResponse.deserialize, + ) + return self._stubs['list_clusters'] + + @property + def get_cluster(self) -> Callable[ + [cluster_service.GetClusterRequest], + cluster_service.Cluster]: + r"""Return a callable for the get cluster method over gRPC. + + Gets the details for a specific cluster. + + Returns: + Callable[[~.GetClusterRequest], + ~.Cluster]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_cluster' not in self._stubs: + self._stubs['get_cluster'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/GetCluster', + request_serializer=cluster_service.GetClusterRequest.serialize, + response_deserializer=cluster_service.Cluster.deserialize, + ) + return self._stubs['get_cluster'] + + @property + def create_cluster(self) -> Callable[ + [cluster_service.CreateClusterRequest], + cluster_service.Operation]: + r"""Return a callable for the create cluster method over gRPC. + + Creates a cluster, consisting of the specified number and type + of Google Compute Engine instances. + + By default, the cluster is created in the project's `default + network `__. + + One firewall is added for the cluster. After cluster creation, + the Kubelet creates routes for each node to allow the containers + on that node to communicate with all other instances in the + cluster. + + Finally, an entry is added to the project's global metadata + indicating which CIDR range the cluster is using. + + Returns: + Callable[[~.CreateClusterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_cluster' not in self._stubs: + self._stubs['create_cluster'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/CreateCluster', + request_serializer=cluster_service.CreateClusterRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['create_cluster'] + + @property + def update_cluster(self) -> Callable[ + [cluster_service.UpdateClusterRequest], + cluster_service.Operation]: + r"""Return a callable for the update cluster method over gRPC. + + Updates the settings for a specific cluster. + + Returns: + Callable[[~.UpdateClusterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_cluster' not in self._stubs: + self._stubs['update_cluster'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/UpdateCluster', + request_serializer=cluster_service.UpdateClusterRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['update_cluster'] + + @property + def update_node_pool(self) -> Callable[ + [cluster_service.UpdateNodePoolRequest], + cluster_service.Operation]: + r"""Return a callable for the update node pool method over gRPC. + + Updates the version and/or image type of a specific + node pool. + + Returns: + Callable[[~.UpdateNodePoolRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_node_pool' not in self._stubs: + self._stubs['update_node_pool'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/UpdateNodePool', + request_serializer=cluster_service.UpdateNodePoolRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['update_node_pool'] + + @property + def set_node_pool_autoscaling(self) -> Callable[ + [cluster_service.SetNodePoolAutoscalingRequest], + cluster_service.Operation]: + r"""Return a callable for the set node pool autoscaling method over gRPC. + + Sets the autoscaling settings of a specific node + pool. + + Returns: + Callable[[~.SetNodePoolAutoscalingRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_node_pool_autoscaling' not in self._stubs: + self._stubs['set_node_pool_autoscaling'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/SetNodePoolAutoscaling', + request_serializer=cluster_service.SetNodePoolAutoscalingRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['set_node_pool_autoscaling'] + + @property + def set_logging_service(self) -> Callable[ + [cluster_service.SetLoggingServiceRequest], + cluster_service.Operation]: + r"""Return a callable for the set logging service method over gRPC. + + Sets the logging service for a specific cluster. + + Returns: + Callable[[~.SetLoggingServiceRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_logging_service' not in self._stubs: + self._stubs['set_logging_service'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/SetLoggingService', + request_serializer=cluster_service.SetLoggingServiceRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['set_logging_service'] + + @property + def set_monitoring_service(self) -> Callable[ + [cluster_service.SetMonitoringServiceRequest], + cluster_service.Operation]: + r"""Return a callable for the set monitoring service method over gRPC. + + Sets the monitoring service for a specific cluster. + + Returns: + Callable[[~.SetMonitoringServiceRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_monitoring_service' not in self._stubs: + self._stubs['set_monitoring_service'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/SetMonitoringService', + request_serializer=cluster_service.SetMonitoringServiceRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['set_monitoring_service'] + + @property + def set_addons_config(self) -> Callable[ + [cluster_service.SetAddonsConfigRequest], + cluster_service.Operation]: + r"""Return a callable for the set addons config method over gRPC. + + Sets the addons for a specific cluster. + + Returns: + Callable[[~.SetAddonsConfigRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_addons_config' not in self._stubs: + self._stubs['set_addons_config'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/SetAddonsConfig', + request_serializer=cluster_service.SetAddonsConfigRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['set_addons_config'] + + @property + def set_locations(self) -> Callable[ + [cluster_service.SetLocationsRequest], + cluster_service.Operation]: + r"""Return a callable for the set locations method over gRPC. + + Sets the locations for a specific cluster. Deprecated. Use + `projects.locations.clusters.update `__ + instead. + + Returns: + Callable[[~.SetLocationsRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_locations' not in self._stubs: + self._stubs['set_locations'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/SetLocations', + request_serializer=cluster_service.SetLocationsRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['set_locations'] + + @property + def update_master(self) -> Callable[ + [cluster_service.UpdateMasterRequest], + cluster_service.Operation]: + r"""Return a callable for the update master method over gRPC. + + Updates the master for a specific cluster. + + Returns: + Callable[[~.UpdateMasterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_master' not in self._stubs: + self._stubs['update_master'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/UpdateMaster', + request_serializer=cluster_service.UpdateMasterRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['update_master'] + + @property + def set_master_auth(self) -> Callable[ + [cluster_service.SetMasterAuthRequest], + cluster_service.Operation]: + r"""Return a callable for the set master auth method over gRPC. + + Sets master auth materials. Currently supports + changing the admin password or a specific cluster, + either via password generation or explicitly setting the + password. + + Returns: + Callable[[~.SetMasterAuthRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_master_auth' not in self._stubs: + self._stubs['set_master_auth'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/SetMasterAuth', + request_serializer=cluster_service.SetMasterAuthRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['set_master_auth'] + + @property + def delete_cluster(self) -> Callable[ + [cluster_service.DeleteClusterRequest], + cluster_service.Operation]: + r"""Return a callable for the delete cluster method over gRPC. + + Deletes the cluster, including the Kubernetes + endpoint and all worker nodes. + + Firewalls and routes that were configured during cluster + creation are also deleted. + + Other Google Compute Engine resources that might be in + use by the cluster, such as load balancer resources, are + not deleted if they weren't present when the cluster was + initially created. + + Returns: + Callable[[~.DeleteClusterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_cluster' not in self._stubs: + self._stubs['delete_cluster'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/DeleteCluster', + request_serializer=cluster_service.DeleteClusterRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['delete_cluster'] + + @property + def list_operations(self) -> Callable[ + [cluster_service.ListOperationsRequest], + cluster_service.ListOperationsResponse]: + r"""Return a callable for the list operations method over gRPC. + + Lists all operations in a project in the specified + zone or all zones. + + Returns: + Callable[[~.ListOperationsRequest], + ~.ListOperationsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_operations' not in self._stubs: + self._stubs['list_operations'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/ListOperations', + request_serializer=cluster_service.ListOperationsRequest.serialize, + response_deserializer=cluster_service.ListOperationsResponse.deserialize, + ) + return self._stubs['list_operations'] + + @property + def get_operation(self) -> Callable[ + [cluster_service.GetOperationRequest], + cluster_service.Operation]: + r"""Return a callable for the get operation method over gRPC. + + Gets the specified operation. + + Returns: + Callable[[~.GetOperationRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_operation' not in self._stubs: + self._stubs['get_operation'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/GetOperation', + request_serializer=cluster_service.GetOperationRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['get_operation'] + + @property + def cancel_operation(self) -> Callable[ + [cluster_service.CancelOperationRequest], + empty_pb2.Empty]: + r"""Return a callable for the cancel operation method over gRPC. + + Cancels the specified operation. + + Returns: + Callable[[~.CancelOperationRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_operation' not in self._stubs: + self._stubs['cancel_operation'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/CancelOperation', + request_serializer=cluster_service.CancelOperationRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_operation'] + + @property + def get_server_config(self) -> Callable[ + [cluster_service.GetServerConfigRequest], + cluster_service.ServerConfig]: + r"""Return a callable for the get server config method over gRPC. + + Returns configuration info about the Google + Kubernetes Engine service. + + Returns: + Callable[[~.GetServerConfigRequest], + ~.ServerConfig]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_server_config' not in self._stubs: + self._stubs['get_server_config'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/GetServerConfig', + request_serializer=cluster_service.GetServerConfigRequest.serialize, + response_deserializer=cluster_service.ServerConfig.deserialize, + ) + return self._stubs['get_server_config'] + + @property + def list_node_pools(self) -> Callable[ + [cluster_service.ListNodePoolsRequest], + cluster_service.ListNodePoolsResponse]: + r"""Return a callable for the list node pools method over gRPC. + + Lists the node pools for a cluster. + + Returns: + Callable[[~.ListNodePoolsRequest], + ~.ListNodePoolsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_node_pools' not in self._stubs: + self._stubs['list_node_pools'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/ListNodePools', + request_serializer=cluster_service.ListNodePoolsRequest.serialize, + response_deserializer=cluster_service.ListNodePoolsResponse.deserialize, + ) + return self._stubs['list_node_pools'] + + @property + def get_json_web_keys(self) -> Callable[ + [cluster_service.GetJSONWebKeysRequest], + cluster_service.GetJSONWebKeysResponse]: + r"""Return a callable for the get json web keys method over gRPC. + + Gets the public component of the cluster signing keys + in JSON Web Key format. + This API is not yet intended for general use, and is not + available for all clusters. + + Returns: + Callable[[~.GetJSONWebKeysRequest], + ~.GetJSONWebKeysResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_json_web_keys' not in self._stubs: + self._stubs['get_json_web_keys'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/GetJSONWebKeys', + request_serializer=cluster_service.GetJSONWebKeysRequest.serialize, + response_deserializer=cluster_service.GetJSONWebKeysResponse.deserialize, + ) + return self._stubs['get_json_web_keys'] + + @property + def get_node_pool(self) -> Callable[ + [cluster_service.GetNodePoolRequest], + cluster_service.NodePool]: + r"""Return a callable for the get node pool method over gRPC. + + Retrieves the requested node pool. + + Returns: + Callable[[~.GetNodePoolRequest], + ~.NodePool]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_node_pool' not in self._stubs: + self._stubs['get_node_pool'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/GetNodePool', + request_serializer=cluster_service.GetNodePoolRequest.serialize, + response_deserializer=cluster_service.NodePool.deserialize, + ) + return self._stubs['get_node_pool'] + + @property + def create_node_pool(self) -> Callable[ + [cluster_service.CreateNodePoolRequest], + cluster_service.Operation]: + r"""Return a callable for the create node pool method over gRPC. + + Creates a node pool for a cluster. + + Returns: + Callable[[~.CreateNodePoolRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_node_pool' not in self._stubs: + self._stubs['create_node_pool'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/CreateNodePool', + request_serializer=cluster_service.CreateNodePoolRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['create_node_pool'] + + @property + def delete_node_pool(self) -> Callable[ + [cluster_service.DeleteNodePoolRequest], + cluster_service.Operation]: + r"""Return a callable for the delete node pool method over gRPC. + + Deletes a node pool from a cluster. + + Returns: + Callable[[~.DeleteNodePoolRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_node_pool' not in self._stubs: + self._stubs['delete_node_pool'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/DeleteNodePool', + request_serializer=cluster_service.DeleteNodePoolRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['delete_node_pool'] + + @property + def rollback_node_pool_upgrade(self) -> Callable[ + [cluster_service.RollbackNodePoolUpgradeRequest], + cluster_service.Operation]: + r"""Return a callable for the rollback node pool upgrade method over gRPC. + + Rolls back a previously Aborted or Failed NodePool + upgrade. This makes no changes if the last upgrade + successfully completed. + + Returns: + Callable[[~.RollbackNodePoolUpgradeRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'rollback_node_pool_upgrade' not in self._stubs: + self._stubs['rollback_node_pool_upgrade'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/RollbackNodePoolUpgrade', + request_serializer=cluster_service.RollbackNodePoolUpgradeRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['rollback_node_pool_upgrade'] + + @property + def set_node_pool_management(self) -> Callable[ + [cluster_service.SetNodePoolManagementRequest], + cluster_service.Operation]: + r"""Return a callable for the set node pool management method over gRPC. + + Sets the NodeManagement options for a node pool. + + Returns: + Callable[[~.SetNodePoolManagementRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_node_pool_management' not in self._stubs: + self._stubs['set_node_pool_management'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/SetNodePoolManagement', + request_serializer=cluster_service.SetNodePoolManagementRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['set_node_pool_management'] + + @property + def set_labels(self) -> Callable[ + [cluster_service.SetLabelsRequest], + cluster_service.Operation]: + r"""Return a callable for the set labels method over gRPC. + + Sets labels on a cluster. + + Returns: + Callable[[~.SetLabelsRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_labels' not in self._stubs: + self._stubs['set_labels'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/SetLabels', + request_serializer=cluster_service.SetLabelsRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['set_labels'] + + @property + def set_legacy_abac(self) -> Callable[ + [cluster_service.SetLegacyAbacRequest], + cluster_service.Operation]: + r"""Return a callable for the set legacy abac method over gRPC. + + Enables or disables the ABAC authorization mechanism + on a cluster. + + Returns: + Callable[[~.SetLegacyAbacRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_legacy_abac' not in self._stubs: + self._stubs['set_legacy_abac'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/SetLegacyAbac', + request_serializer=cluster_service.SetLegacyAbacRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['set_legacy_abac'] + + @property + def start_ip_rotation(self) -> Callable[ + [cluster_service.StartIPRotationRequest], + cluster_service.Operation]: + r"""Return a callable for the start ip rotation method over gRPC. + + Starts master IP rotation. + + Returns: + Callable[[~.StartIPRotationRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'start_ip_rotation' not in self._stubs: + self._stubs['start_ip_rotation'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/StartIPRotation', + request_serializer=cluster_service.StartIPRotationRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['start_ip_rotation'] + + @property + def complete_ip_rotation(self) -> Callable[ + [cluster_service.CompleteIPRotationRequest], + cluster_service.Operation]: + r"""Return a callable for the complete ip rotation method over gRPC. + + Completes master IP rotation. + + Returns: + Callable[[~.CompleteIPRotationRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'complete_ip_rotation' not in self._stubs: + self._stubs['complete_ip_rotation'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/CompleteIPRotation', + request_serializer=cluster_service.CompleteIPRotationRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['complete_ip_rotation'] + + @property + def set_node_pool_size(self) -> Callable[ + [cluster_service.SetNodePoolSizeRequest], + cluster_service.Operation]: + r"""Return a callable for the set node pool size method over gRPC. + + Sets the size for a specific node pool. + + Returns: + Callable[[~.SetNodePoolSizeRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_node_pool_size' not in self._stubs: + self._stubs['set_node_pool_size'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/SetNodePoolSize', + request_serializer=cluster_service.SetNodePoolSizeRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['set_node_pool_size'] + + @property + def set_network_policy(self) -> Callable[ + [cluster_service.SetNetworkPolicyRequest], + cluster_service.Operation]: + r"""Return a callable for the set network policy method over gRPC. + + Enables or disables Network Policy for a cluster. + + Returns: + Callable[[~.SetNetworkPolicyRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_network_policy' not in self._stubs: + self._stubs['set_network_policy'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/SetNetworkPolicy', + request_serializer=cluster_service.SetNetworkPolicyRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['set_network_policy'] + + @property + def set_maintenance_policy(self) -> Callable[ + [cluster_service.SetMaintenancePolicyRequest], + cluster_service.Operation]: + r"""Return a callable for the set maintenance policy method over gRPC. + + Sets the maintenance policy for a cluster. + + Returns: + Callable[[~.SetMaintenancePolicyRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_maintenance_policy' not in self._stubs: + self._stubs['set_maintenance_policy'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/SetMaintenancePolicy', + request_serializer=cluster_service.SetMaintenancePolicyRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['set_maintenance_policy'] + + @property + def list_usable_subnetworks(self) -> Callable[ + [cluster_service.ListUsableSubnetworksRequest], + cluster_service.ListUsableSubnetworksResponse]: + r"""Return a callable for the list usable subnetworks method over gRPC. + + Lists subnetworks that can be used for creating + clusters in a project. + + Returns: + Callable[[~.ListUsableSubnetworksRequest], + ~.ListUsableSubnetworksResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_usable_subnetworks' not in self._stubs: + self._stubs['list_usable_subnetworks'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/ListUsableSubnetworks', + request_serializer=cluster_service.ListUsableSubnetworksRequest.serialize, + response_deserializer=cluster_service.ListUsableSubnetworksResponse.deserialize, + ) + return self._stubs['list_usable_subnetworks'] + + @property + def list_locations(self) -> Callable[ + [cluster_service.ListLocationsRequest], + cluster_service.ListLocationsResponse]: + r"""Return a callable for the list locations method over gRPC. + + Fetches locations that offer Google Kubernetes + Engine. + + Returns: + Callable[[~.ListLocationsRequest], + ~.ListLocationsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_locations' not in self._stubs: + self._stubs['list_locations'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/ListLocations', + request_serializer=cluster_service.ListLocationsRequest.serialize, + response_deserializer=cluster_service.ListLocationsResponse.deserialize, + ) + return self._stubs['list_locations'] + + +__all__ = ( + 'ClusterManagerGrpcTransport', +) diff --git a/owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/transports/grpc_asyncio.py new file mode 100644 index 00000000..f2dd2dda --- /dev/null +++ b/owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/transports/grpc_asyncio.py @@ -0,0 +1,1128 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 # type: ignore +from google.api_core import grpc_helpers_async # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +import packaging.version + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.container_v1beta1.types import cluster_service +from google.protobuf import empty_pb2 # type: ignore +from .base import ClusterManagerTransport, DEFAULT_CLIENT_INFO +from .grpc import ClusterManagerGrpcTransport + + +class ClusterManagerGrpcAsyncIOTransport(ClusterManagerTransport): + """gRPC AsyncIO backend transport for ClusterManager. + + Google Kubernetes Engine Cluster Manager v1beta1 + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'container.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'container.googleapis.com', + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: aio.Channel = None, + api_mtls_endpoint: str = None, + client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, + ssl_channel_credentials: grpc.ChannelCredentials = None, + client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + quota_project_id=None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or applicatin default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + credentials=self._credentials, + credentials_file=credentials_file, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def list_clusters(self) -> Callable[ + [cluster_service.ListClustersRequest], + Awaitable[cluster_service.ListClustersResponse]]: + r"""Return a callable for the list clusters method over gRPC. + + Lists all clusters owned by a project in either the + specified zone or all zones. + + Returns: + Callable[[~.ListClustersRequest], + Awaitable[~.ListClustersResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_clusters' not in self._stubs: + self._stubs['list_clusters'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/ListClusters', + request_serializer=cluster_service.ListClustersRequest.serialize, + response_deserializer=cluster_service.ListClustersResponse.deserialize, + ) + return self._stubs['list_clusters'] + + @property + def get_cluster(self) -> Callable[ + [cluster_service.GetClusterRequest], + Awaitable[cluster_service.Cluster]]: + r"""Return a callable for the get cluster method over gRPC. + + Gets the details for a specific cluster. + + Returns: + Callable[[~.GetClusterRequest], + Awaitable[~.Cluster]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_cluster' not in self._stubs: + self._stubs['get_cluster'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/GetCluster', + request_serializer=cluster_service.GetClusterRequest.serialize, + response_deserializer=cluster_service.Cluster.deserialize, + ) + return self._stubs['get_cluster'] + + @property + def create_cluster(self) -> Callable[ + [cluster_service.CreateClusterRequest], + Awaitable[cluster_service.Operation]]: + r"""Return a callable for the create cluster method over gRPC. + + Creates a cluster, consisting of the specified number and type + of Google Compute Engine instances. + + By default, the cluster is created in the project's `default + network `__. + + One firewall is added for the cluster. After cluster creation, + the Kubelet creates routes for each node to allow the containers + on that node to communicate with all other instances in the + cluster. + + Finally, an entry is added to the project's global metadata + indicating which CIDR range the cluster is using. + + Returns: + Callable[[~.CreateClusterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_cluster' not in self._stubs: + self._stubs['create_cluster'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/CreateCluster', + request_serializer=cluster_service.CreateClusterRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['create_cluster'] + + @property + def update_cluster(self) -> Callable[ + [cluster_service.UpdateClusterRequest], + Awaitable[cluster_service.Operation]]: + r"""Return a callable for the update cluster method over gRPC. + + Updates the settings for a specific cluster. + + Returns: + Callable[[~.UpdateClusterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_cluster' not in self._stubs: + self._stubs['update_cluster'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/UpdateCluster', + request_serializer=cluster_service.UpdateClusterRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['update_cluster'] + + @property + def update_node_pool(self) -> Callable[ + [cluster_service.UpdateNodePoolRequest], + Awaitable[cluster_service.Operation]]: + r"""Return a callable for the update node pool method over gRPC. + + Updates the version and/or image type of a specific + node pool. + + Returns: + Callable[[~.UpdateNodePoolRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_node_pool' not in self._stubs: + self._stubs['update_node_pool'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/UpdateNodePool', + request_serializer=cluster_service.UpdateNodePoolRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['update_node_pool'] + + @property + def set_node_pool_autoscaling(self) -> Callable[ + [cluster_service.SetNodePoolAutoscalingRequest], + Awaitable[cluster_service.Operation]]: + r"""Return a callable for the set node pool autoscaling method over gRPC. + + Sets the autoscaling settings of a specific node + pool. + + Returns: + Callable[[~.SetNodePoolAutoscalingRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_node_pool_autoscaling' not in self._stubs: + self._stubs['set_node_pool_autoscaling'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/SetNodePoolAutoscaling', + request_serializer=cluster_service.SetNodePoolAutoscalingRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['set_node_pool_autoscaling'] + + @property + def set_logging_service(self) -> Callable[ + [cluster_service.SetLoggingServiceRequest], + Awaitable[cluster_service.Operation]]: + r"""Return a callable for the set logging service method over gRPC. + + Sets the logging service for a specific cluster. + + Returns: + Callable[[~.SetLoggingServiceRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_logging_service' not in self._stubs: + self._stubs['set_logging_service'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/SetLoggingService', + request_serializer=cluster_service.SetLoggingServiceRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['set_logging_service'] + + @property + def set_monitoring_service(self) -> Callable[ + [cluster_service.SetMonitoringServiceRequest], + Awaitable[cluster_service.Operation]]: + r"""Return a callable for the set monitoring service method over gRPC. + + Sets the monitoring service for a specific cluster. + + Returns: + Callable[[~.SetMonitoringServiceRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_monitoring_service' not in self._stubs: + self._stubs['set_monitoring_service'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/SetMonitoringService', + request_serializer=cluster_service.SetMonitoringServiceRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['set_monitoring_service'] + + @property + def set_addons_config(self) -> Callable[ + [cluster_service.SetAddonsConfigRequest], + Awaitable[cluster_service.Operation]]: + r"""Return a callable for the set addons config method over gRPC. + + Sets the addons for a specific cluster. + + Returns: + Callable[[~.SetAddonsConfigRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_addons_config' not in self._stubs: + self._stubs['set_addons_config'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/SetAddonsConfig', + request_serializer=cluster_service.SetAddonsConfigRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['set_addons_config'] + + @property + def set_locations(self) -> Callable[ + [cluster_service.SetLocationsRequest], + Awaitable[cluster_service.Operation]]: + r"""Return a callable for the set locations method over gRPC. + + Sets the locations for a specific cluster. Deprecated. Use + `projects.locations.clusters.update `__ + instead. + + Returns: + Callable[[~.SetLocationsRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_locations' not in self._stubs: + self._stubs['set_locations'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/SetLocations', + request_serializer=cluster_service.SetLocationsRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['set_locations'] + + @property + def update_master(self) -> Callable[ + [cluster_service.UpdateMasterRequest], + Awaitable[cluster_service.Operation]]: + r"""Return a callable for the update master method over gRPC. + + Updates the master for a specific cluster. + + Returns: + Callable[[~.UpdateMasterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_master' not in self._stubs: + self._stubs['update_master'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/UpdateMaster', + request_serializer=cluster_service.UpdateMasterRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['update_master'] + + @property + def set_master_auth(self) -> Callable[ + [cluster_service.SetMasterAuthRequest], + Awaitable[cluster_service.Operation]]: + r"""Return a callable for the set master auth method over gRPC. + + Sets master auth materials. Currently supports + changing the admin password or a specific cluster, + either via password generation or explicitly setting the + password. + + Returns: + Callable[[~.SetMasterAuthRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_master_auth' not in self._stubs: + self._stubs['set_master_auth'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/SetMasterAuth', + request_serializer=cluster_service.SetMasterAuthRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['set_master_auth'] + + @property + def delete_cluster(self) -> Callable[ + [cluster_service.DeleteClusterRequest], + Awaitable[cluster_service.Operation]]: + r"""Return a callable for the delete cluster method over gRPC. + + Deletes the cluster, including the Kubernetes + endpoint and all worker nodes. + + Firewalls and routes that were configured during cluster + creation are also deleted. + + Other Google Compute Engine resources that might be in + use by the cluster, such as load balancer resources, are + not deleted if they weren't present when the cluster was + initially created. + + Returns: + Callable[[~.DeleteClusterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_cluster' not in self._stubs: + self._stubs['delete_cluster'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/DeleteCluster', + request_serializer=cluster_service.DeleteClusterRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['delete_cluster'] + + @property + def list_operations(self) -> Callable[ + [cluster_service.ListOperationsRequest], + Awaitable[cluster_service.ListOperationsResponse]]: + r"""Return a callable for the list operations method over gRPC. + + Lists all operations in a project in the specified + zone or all zones. + + Returns: + Callable[[~.ListOperationsRequest], + Awaitable[~.ListOperationsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_operations' not in self._stubs: + self._stubs['list_operations'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/ListOperations', + request_serializer=cluster_service.ListOperationsRequest.serialize, + response_deserializer=cluster_service.ListOperationsResponse.deserialize, + ) + return self._stubs['list_operations'] + + @property + def get_operation(self) -> Callable[ + [cluster_service.GetOperationRequest], + Awaitable[cluster_service.Operation]]: + r"""Return a callable for the get operation method over gRPC. + + Gets the specified operation. + + Returns: + Callable[[~.GetOperationRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_operation' not in self._stubs: + self._stubs['get_operation'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/GetOperation', + request_serializer=cluster_service.GetOperationRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['get_operation'] + + @property + def cancel_operation(self) -> Callable[ + [cluster_service.CancelOperationRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the cancel operation method over gRPC. + + Cancels the specified operation. + + Returns: + Callable[[~.CancelOperationRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'cancel_operation' not in self._stubs: + self._stubs['cancel_operation'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/CancelOperation', + request_serializer=cluster_service.CancelOperationRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['cancel_operation'] + + @property + def get_server_config(self) -> Callable[ + [cluster_service.GetServerConfigRequest], + Awaitable[cluster_service.ServerConfig]]: + r"""Return a callable for the get server config method over gRPC. + + Returns configuration info about the Google + Kubernetes Engine service. + + Returns: + Callable[[~.GetServerConfigRequest], + Awaitable[~.ServerConfig]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_server_config' not in self._stubs: + self._stubs['get_server_config'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/GetServerConfig', + request_serializer=cluster_service.GetServerConfigRequest.serialize, + response_deserializer=cluster_service.ServerConfig.deserialize, + ) + return self._stubs['get_server_config'] + + @property + def list_node_pools(self) -> Callable[ + [cluster_service.ListNodePoolsRequest], + Awaitable[cluster_service.ListNodePoolsResponse]]: + r"""Return a callable for the list node pools method over gRPC. + + Lists the node pools for a cluster. + + Returns: + Callable[[~.ListNodePoolsRequest], + Awaitable[~.ListNodePoolsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_node_pools' not in self._stubs: + self._stubs['list_node_pools'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/ListNodePools', + request_serializer=cluster_service.ListNodePoolsRequest.serialize, + response_deserializer=cluster_service.ListNodePoolsResponse.deserialize, + ) + return self._stubs['list_node_pools'] + + @property + def get_json_web_keys(self) -> Callable[ + [cluster_service.GetJSONWebKeysRequest], + Awaitable[cluster_service.GetJSONWebKeysResponse]]: + r"""Return a callable for the get json web keys method over gRPC. + + Gets the public component of the cluster signing keys + in JSON Web Key format. + This API is not yet intended for general use, and is not + available for all clusters. + + Returns: + Callable[[~.GetJSONWebKeysRequest], + Awaitable[~.GetJSONWebKeysResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_json_web_keys' not in self._stubs: + self._stubs['get_json_web_keys'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/GetJSONWebKeys', + request_serializer=cluster_service.GetJSONWebKeysRequest.serialize, + response_deserializer=cluster_service.GetJSONWebKeysResponse.deserialize, + ) + return self._stubs['get_json_web_keys'] + + @property + def get_node_pool(self) -> Callable[ + [cluster_service.GetNodePoolRequest], + Awaitable[cluster_service.NodePool]]: + r"""Return a callable for the get node pool method over gRPC. + + Retrieves the requested node pool. + + Returns: + Callable[[~.GetNodePoolRequest], + Awaitable[~.NodePool]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_node_pool' not in self._stubs: + self._stubs['get_node_pool'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/GetNodePool', + request_serializer=cluster_service.GetNodePoolRequest.serialize, + response_deserializer=cluster_service.NodePool.deserialize, + ) + return self._stubs['get_node_pool'] + + @property + def create_node_pool(self) -> Callable[ + [cluster_service.CreateNodePoolRequest], + Awaitable[cluster_service.Operation]]: + r"""Return a callable for the create node pool method over gRPC. + + Creates a node pool for a cluster. + + Returns: + Callable[[~.CreateNodePoolRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_node_pool' not in self._stubs: + self._stubs['create_node_pool'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/CreateNodePool', + request_serializer=cluster_service.CreateNodePoolRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['create_node_pool'] + + @property + def delete_node_pool(self) -> Callable[ + [cluster_service.DeleteNodePoolRequest], + Awaitable[cluster_service.Operation]]: + r"""Return a callable for the delete node pool method over gRPC. + + Deletes a node pool from a cluster. + + Returns: + Callable[[~.DeleteNodePoolRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_node_pool' not in self._stubs: + self._stubs['delete_node_pool'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/DeleteNodePool', + request_serializer=cluster_service.DeleteNodePoolRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['delete_node_pool'] + + @property + def rollback_node_pool_upgrade(self) -> Callable[ + [cluster_service.RollbackNodePoolUpgradeRequest], + Awaitable[cluster_service.Operation]]: + r"""Return a callable for the rollback node pool upgrade method over gRPC. + + Rolls back a previously Aborted or Failed NodePool + upgrade. This makes no changes if the last upgrade + successfully completed. + + Returns: + Callable[[~.RollbackNodePoolUpgradeRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'rollback_node_pool_upgrade' not in self._stubs: + self._stubs['rollback_node_pool_upgrade'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/RollbackNodePoolUpgrade', + request_serializer=cluster_service.RollbackNodePoolUpgradeRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['rollback_node_pool_upgrade'] + + @property + def set_node_pool_management(self) -> Callable[ + [cluster_service.SetNodePoolManagementRequest], + Awaitable[cluster_service.Operation]]: + r"""Return a callable for the set node pool management method over gRPC. + + Sets the NodeManagement options for a node pool. + + Returns: + Callable[[~.SetNodePoolManagementRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_node_pool_management' not in self._stubs: + self._stubs['set_node_pool_management'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/SetNodePoolManagement', + request_serializer=cluster_service.SetNodePoolManagementRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['set_node_pool_management'] + + @property + def set_labels(self) -> Callable[ + [cluster_service.SetLabelsRequest], + Awaitable[cluster_service.Operation]]: + r"""Return a callable for the set labels method over gRPC. + + Sets labels on a cluster. + + Returns: + Callable[[~.SetLabelsRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_labels' not in self._stubs: + self._stubs['set_labels'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/SetLabels', + request_serializer=cluster_service.SetLabelsRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['set_labels'] + + @property + def set_legacy_abac(self) -> Callable[ + [cluster_service.SetLegacyAbacRequest], + Awaitable[cluster_service.Operation]]: + r"""Return a callable for the set legacy abac method over gRPC. + + Enables or disables the ABAC authorization mechanism + on a cluster. + + Returns: + Callable[[~.SetLegacyAbacRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_legacy_abac' not in self._stubs: + self._stubs['set_legacy_abac'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/SetLegacyAbac', + request_serializer=cluster_service.SetLegacyAbacRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['set_legacy_abac'] + + @property + def start_ip_rotation(self) -> Callable[ + [cluster_service.StartIPRotationRequest], + Awaitable[cluster_service.Operation]]: + r"""Return a callable for the start ip rotation method over gRPC. + + Starts master IP rotation. + + Returns: + Callable[[~.StartIPRotationRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'start_ip_rotation' not in self._stubs: + self._stubs['start_ip_rotation'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/StartIPRotation', + request_serializer=cluster_service.StartIPRotationRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['start_ip_rotation'] + + @property + def complete_ip_rotation(self) -> Callable[ + [cluster_service.CompleteIPRotationRequest], + Awaitable[cluster_service.Operation]]: + r"""Return a callable for the complete ip rotation method over gRPC. + + Completes master IP rotation. + + Returns: + Callable[[~.CompleteIPRotationRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'complete_ip_rotation' not in self._stubs: + self._stubs['complete_ip_rotation'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/CompleteIPRotation', + request_serializer=cluster_service.CompleteIPRotationRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['complete_ip_rotation'] + + @property + def set_node_pool_size(self) -> Callable[ + [cluster_service.SetNodePoolSizeRequest], + Awaitable[cluster_service.Operation]]: + r"""Return a callable for the set node pool size method over gRPC. + + Sets the size for a specific node pool. + + Returns: + Callable[[~.SetNodePoolSizeRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_node_pool_size' not in self._stubs: + self._stubs['set_node_pool_size'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/SetNodePoolSize', + request_serializer=cluster_service.SetNodePoolSizeRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['set_node_pool_size'] + + @property + def set_network_policy(self) -> Callable[ + [cluster_service.SetNetworkPolicyRequest], + Awaitable[cluster_service.Operation]]: + r"""Return a callable for the set network policy method over gRPC. + + Enables or disables Network Policy for a cluster. + + Returns: + Callable[[~.SetNetworkPolicyRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_network_policy' not in self._stubs: + self._stubs['set_network_policy'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/SetNetworkPolicy', + request_serializer=cluster_service.SetNetworkPolicyRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['set_network_policy'] + + @property + def set_maintenance_policy(self) -> Callable[ + [cluster_service.SetMaintenancePolicyRequest], + Awaitable[cluster_service.Operation]]: + r"""Return a callable for the set maintenance policy method over gRPC. + + Sets the maintenance policy for a cluster. + + Returns: + Callable[[~.SetMaintenancePolicyRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_maintenance_policy' not in self._stubs: + self._stubs['set_maintenance_policy'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/SetMaintenancePolicy', + request_serializer=cluster_service.SetMaintenancePolicyRequest.serialize, + response_deserializer=cluster_service.Operation.deserialize, + ) + return self._stubs['set_maintenance_policy'] + + @property + def list_usable_subnetworks(self) -> Callable[ + [cluster_service.ListUsableSubnetworksRequest], + Awaitable[cluster_service.ListUsableSubnetworksResponse]]: + r"""Return a callable for the list usable subnetworks method over gRPC. + + Lists subnetworks that can be used for creating + clusters in a project. + + Returns: + Callable[[~.ListUsableSubnetworksRequest], + Awaitable[~.ListUsableSubnetworksResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_usable_subnetworks' not in self._stubs: + self._stubs['list_usable_subnetworks'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/ListUsableSubnetworks', + request_serializer=cluster_service.ListUsableSubnetworksRequest.serialize, + response_deserializer=cluster_service.ListUsableSubnetworksResponse.deserialize, + ) + return self._stubs['list_usable_subnetworks'] + + @property + def list_locations(self) -> Callable[ + [cluster_service.ListLocationsRequest], + Awaitable[cluster_service.ListLocationsResponse]]: + r"""Return a callable for the list locations method over gRPC. + + Fetches locations that offer Google Kubernetes + Engine. + + Returns: + Callable[[~.ListLocationsRequest], + Awaitable[~.ListLocationsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_locations' not in self._stubs: + self._stubs['list_locations'] = self.grpc_channel.unary_unary( + '/google.container.v1beta1.ClusterManager/ListLocations', + request_serializer=cluster_service.ListLocationsRequest.serialize, + response_deserializer=cluster_service.ListLocationsResponse.deserialize, + ) + return self._stubs['list_locations'] + + +__all__ = ( + 'ClusterManagerGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/v1beta1/google/container_v1beta1/types/__init__.py b/owl-bot-staging/v1beta1/google/container_v1beta1/types/__init__.py new file mode 100644 index 00000000..6184d07a --- /dev/null +++ b/owl-bot-staging/v1beta1/google/container_v1beta1/types/__init__.py @@ -0,0 +1,246 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .cluster_service import ( + AcceleratorConfig, + AddonsConfig, + AuthenticatorGroupsConfig, + AutoprovisioningNodePoolDefaults, + AutoUpgradeOptions, + BinaryAuthorization, + CancelOperationRequest, + ClientCertificateConfig, + CloudRunConfig, + Cluster, + ClusterAutoscaling, + ClusterTelemetry, + ClusterUpdate, + CompleteIPRotationRequest, + ConfidentialNodes, + ConfigConnectorConfig, + CreateClusterRequest, + CreateNodePoolRequest, + DailyMaintenanceWindow, + DatabaseEncryption, + DefaultSnatStatus, + DeleteClusterRequest, + DeleteNodePoolRequest, + DnsCacheConfig, + EphemeralStorageConfig, + GcePersistentDiskCsiDriverConfig, + GetClusterRequest, + GetJSONWebKeysRequest, + GetJSONWebKeysResponse, + GetNodePoolRequest, + GetOpenIDConfigRequest, + GetOpenIDConfigResponse, + GetOperationRequest, + GetServerConfigRequest, + HorizontalPodAutoscaling, + HttpLoadBalancing, + IntraNodeVisibilityConfig, + IPAllocationPolicy, + IstioConfig, + Jwk, + KalmConfig, + KubernetesDashboard, + LegacyAbac, + LinuxNodeConfig, + ListClustersRequest, + ListClustersResponse, + ListLocationsRequest, + ListLocationsResponse, + ListNodePoolsRequest, + ListNodePoolsResponse, + ListOperationsRequest, + ListOperationsResponse, + ListUsableSubnetworksRequest, + ListUsableSubnetworksResponse, + Location, + MaintenancePolicy, + MaintenanceWindow, + Master, + MasterAuth, + MasterAuthorizedNetworksConfig, + MaxPodsConstraint, + NetworkConfig, + NetworkPolicy, + NetworkPolicyConfig, + NodeConfig, + NodeKubeletConfig, + NodeManagement, + NodePool, + NodePoolAutoscaling, + NodeTaint, + NotificationConfig, + Operation, + OperationProgress, + PodSecurityPolicyConfig, + PrivateClusterConfig, + PrivateClusterMasterGlobalAccessConfig, + RecurringTimeWindow, + ReleaseChannel, + ReservationAffinity, + ResourceLimit, + ResourceUsageExportConfig, + RollbackNodePoolUpgradeRequest, + SandboxConfig, + ServerConfig, + SetAddonsConfigRequest, + SetLabelsRequest, + SetLegacyAbacRequest, + SetLocationsRequest, + SetLoggingServiceRequest, + SetMaintenancePolicyRequest, + SetMasterAuthRequest, + SetMonitoringServiceRequest, + SetNetworkPolicyRequest, + SetNodePoolAutoscalingRequest, + SetNodePoolManagementRequest, + SetNodePoolSizeRequest, + ShieldedInstanceConfig, + ShieldedNodes, + StartIPRotationRequest, + StatusCondition, + TimeWindow, + TpuConfig, + UpdateClusterRequest, + UpdateMasterRequest, + UpdateNodePoolRequest, + UpgradeEvent, + UsableSubnetwork, + UsableSubnetworkSecondaryRange, + VerticalPodAutoscaling, + WorkloadIdentityConfig, + WorkloadMetadataConfig, + DatapathProvider, + UpgradeResourceType, +) + +__all__ = ( + 'AcceleratorConfig', + 'AddonsConfig', + 'AuthenticatorGroupsConfig', + 'AutoprovisioningNodePoolDefaults', + 'AutoUpgradeOptions', + 'BinaryAuthorization', + 'CancelOperationRequest', + 'ClientCertificateConfig', + 'CloudRunConfig', + 'Cluster', + 'ClusterAutoscaling', + 'ClusterTelemetry', + 'ClusterUpdate', + 'CompleteIPRotationRequest', + 'ConfidentialNodes', + 'ConfigConnectorConfig', + 'CreateClusterRequest', + 'CreateNodePoolRequest', + 'DailyMaintenanceWindow', + 'DatabaseEncryption', + 'DefaultSnatStatus', + 'DeleteClusterRequest', + 'DeleteNodePoolRequest', + 'DnsCacheConfig', + 'EphemeralStorageConfig', + 'GcePersistentDiskCsiDriverConfig', + 'GetClusterRequest', + 'GetJSONWebKeysRequest', + 'GetJSONWebKeysResponse', + 'GetNodePoolRequest', + 'GetOpenIDConfigRequest', + 'GetOpenIDConfigResponse', + 'GetOperationRequest', + 'GetServerConfigRequest', + 'HorizontalPodAutoscaling', + 'HttpLoadBalancing', + 'IntraNodeVisibilityConfig', + 'IPAllocationPolicy', + 'IstioConfig', + 'Jwk', + 'KalmConfig', + 'KubernetesDashboard', + 'LegacyAbac', + 'LinuxNodeConfig', + 'ListClustersRequest', + 'ListClustersResponse', + 'ListLocationsRequest', + 'ListLocationsResponse', + 'ListNodePoolsRequest', + 'ListNodePoolsResponse', + 'ListOperationsRequest', + 'ListOperationsResponse', + 'ListUsableSubnetworksRequest', + 'ListUsableSubnetworksResponse', + 'Location', + 'MaintenancePolicy', + 'MaintenanceWindow', + 'Master', + 'MasterAuth', + 'MasterAuthorizedNetworksConfig', + 'MaxPodsConstraint', + 'NetworkConfig', + 'NetworkPolicy', + 'NetworkPolicyConfig', + 'NodeConfig', + 'NodeKubeletConfig', + 'NodeManagement', + 'NodePool', + 'NodePoolAutoscaling', + 'NodeTaint', + 'NotificationConfig', + 'Operation', + 'OperationProgress', + 'PodSecurityPolicyConfig', + 'PrivateClusterConfig', + 'PrivateClusterMasterGlobalAccessConfig', + 'RecurringTimeWindow', + 'ReleaseChannel', + 'ReservationAffinity', + 'ResourceLimit', + 'ResourceUsageExportConfig', + 'RollbackNodePoolUpgradeRequest', + 'SandboxConfig', + 'ServerConfig', + 'SetAddonsConfigRequest', + 'SetLabelsRequest', + 'SetLegacyAbacRequest', + 'SetLocationsRequest', + 'SetLoggingServiceRequest', + 'SetMaintenancePolicyRequest', + 'SetMasterAuthRequest', + 'SetMonitoringServiceRequest', + 'SetNetworkPolicyRequest', + 'SetNodePoolAutoscalingRequest', + 'SetNodePoolManagementRequest', + 'SetNodePoolSizeRequest', + 'ShieldedInstanceConfig', + 'ShieldedNodes', + 'StartIPRotationRequest', + 'StatusCondition', + 'TimeWindow', + 'TpuConfig', + 'UpdateClusterRequest', + 'UpdateMasterRequest', + 'UpdateNodePoolRequest', + 'UpgradeEvent', + 'UsableSubnetwork', + 'UsableSubnetworkSecondaryRange', + 'VerticalPodAutoscaling', + 'WorkloadIdentityConfig', + 'WorkloadMetadataConfig', + 'DatapathProvider', + 'UpgradeResourceType', +) diff --git a/owl-bot-staging/v1beta1/google/container_v1beta1/types/cluster_service.py b/owl-bot-staging/v1beta1/google/container_v1beta1/types/cluster_service.py new file mode 100644 index 00000000..bec8d843 --- /dev/null +++ b/owl-bot-staging/v1beta1/google/container_v1beta1/types/cluster_service.py @@ -0,0 +1,5866 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore +from google.protobuf import wrappers_pb2 # type: ignore +from google.rpc import code_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.container.v1beta1', + manifest={ + 'DatapathProvider', + 'UpgradeResourceType', + 'LinuxNodeConfig', + 'NodeKubeletConfig', + 'NodeConfig', + 'ShieldedInstanceConfig', + 'SandboxConfig', + 'EphemeralStorageConfig', + 'ReservationAffinity', + 'NodeTaint', + 'MasterAuth', + 'ClientCertificateConfig', + 'AddonsConfig', + 'HttpLoadBalancing', + 'HorizontalPodAutoscaling', + 'KubernetesDashboard', + 'NetworkPolicyConfig', + 'DnsCacheConfig', + 'KalmConfig', + 'ConfigConnectorConfig', + 'GcePersistentDiskCsiDriverConfig', + 'PrivateClusterMasterGlobalAccessConfig', + 'PrivateClusterConfig', + 'IstioConfig', + 'CloudRunConfig', + 'MasterAuthorizedNetworksConfig', + 'LegacyAbac', + 'NetworkPolicy', + 'IPAllocationPolicy', + 'BinaryAuthorization', + 'PodSecurityPolicyConfig', + 'AuthenticatorGroupsConfig', + 'ClusterTelemetry', + 'Cluster', + 'ClusterUpdate', + 'Operation', + 'OperationProgress', + 'CreateClusterRequest', + 'GetClusterRequest', + 'UpdateClusterRequest', + 'UpdateNodePoolRequest', + 'SetNodePoolAutoscalingRequest', + 'SetLoggingServiceRequest', + 'SetMonitoringServiceRequest', + 'SetAddonsConfigRequest', + 'SetLocationsRequest', + 'UpdateMasterRequest', + 'SetMasterAuthRequest', + 'DeleteClusterRequest', + 'ListClustersRequest', + 'ListClustersResponse', + 'GetOperationRequest', + 'ListOperationsRequest', + 'CancelOperationRequest', + 'ListOperationsResponse', + 'GetServerConfigRequest', + 'ServerConfig', + 'CreateNodePoolRequest', + 'DeleteNodePoolRequest', + 'ListNodePoolsRequest', + 'GetNodePoolRequest', + 'NodePool', + 'NodeManagement', + 'AutoUpgradeOptions', + 'MaintenancePolicy', + 'MaintenanceWindow', + 'TimeWindow', + 'RecurringTimeWindow', + 'DailyMaintenanceWindow', + 'SetNodePoolManagementRequest', + 'SetNodePoolSizeRequest', + 'RollbackNodePoolUpgradeRequest', + 'ListNodePoolsResponse', + 'ClusterAutoscaling', + 'AutoprovisioningNodePoolDefaults', + 'ResourceLimit', + 'NodePoolAutoscaling', + 'SetLabelsRequest', + 'SetLegacyAbacRequest', + 'StartIPRotationRequest', + 'CompleteIPRotationRequest', + 'AcceleratorConfig', + 'WorkloadMetadataConfig', + 'SetNetworkPolicyRequest', + 'SetMaintenancePolicyRequest', + 'ListLocationsRequest', + 'ListLocationsResponse', + 'Location', + 'StatusCondition', + 'NetworkConfig', + 'ListUsableSubnetworksRequest', + 'ListUsableSubnetworksResponse', + 'UsableSubnetworkSecondaryRange', + 'UsableSubnetwork', + 'VerticalPodAutoscaling', + 'DefaultSnatStatus', + 'IntraNodeVisibilityConfig', + 'MaxPodsConstraint', + 'WorkloadIdentityConfig', + 'DatabaseEncryption', + 'ResourceUsageExportConfig', + 'ShieldedNodes', + 'GetOpenIDConfigRequest', + 'GetOpenIDConfigResponse', + 'GetJSONWebKeysRequest', + 'Jwk', + 'GetJSONWebKeysResponse', + 'ReleaseChannel', + 'TpuConfig', + 'Master', + 'NotificationConfig', + 'ConfidentialNodes', + 'UpgradeEvent', + }, +) + + +class DatapathProvider(proto.Enum): + r"""The datapath provider selects the implementation of the + Kubernetes networking // model for service resolution and + network policy enforcement. + """ + DATAPATH_PROVIDER_UNSPECIFIED = 0 + LEGACY_DATAPATH = 1 + ADVANCED_DATAPATH = 2 + + +class UpgradeResourceType(proto.Enum): + r"""UpgradeResourceType is the resource type that is upgrading. + It is used in upgrade notifications. + """ + UPGRADE_RESOURCE_TYPE_UNSPECIFIED = 0 + MASTER = 1 + NODE_POOL = 2 + + +class LinuxNodeConfig(proto.Message): + r"""Parameters that can be configured on Linux nodes. + Attributes: + sysctls (Sequence[google.container_v1beta1.types.LinuxNodeConfig.SysctlsEntry]): + The Linux kernel parameters to be applied to the nodes and + all pods running on the nodes. + + The following parameters are supported. + + net.core.netdev_max_backlog net.core.rmem_max + net.core.wmem_default net.core.wmem_max net.core.optmem_max + net.core.somaxconn net.ipv4.tcp_rmem net.ipv4.tcp_wmem + net.ipv4.tcp_tw_reuse + """ + + sysctls = proto.MapField( + proto.STRING, + proto.STRING, + number=1, + ) + + +class NodeKubeletConfig(proto.Message): + r"""Node kubelet configs. + Attributes: + cpu_manager_policy (str): + Control the CPU management policy on the + node. See + https://kubernetes.io/docs/tasks/administer- + cluster/cpu-management-policies/ + The following values are allowed. + - "none": the default, which represents the + existing scheduling behavior. - "static": + allows pods with certain resource + characteristics to be granted + increased CPU affinity and exclusivity on the + node. The default value is 'none' if + unspecified. + cpu_cfs_quota (google.protobuf.wrappers_pb2.BoolValue): + Enable CPU CFS quota enforcement for + containers that specify CPU limits. + This option is enabled by default which makes + kubelet use CFS quota + (https://www.kernel.org/doc/Documentation/scheduler/sched- + bwc.txt) to enforce container CPU limits. + Otherwise, CPU limits will not be enforced at + all. + + Disable this option to mitigate CPU throttling + problems while still having your pods to be in + Guaranteed QoS class by specifying the CPU + limits. + The default value is 'true' if unspecified. + cpu_cfs_quota_period (str): + Set the CPU CFS quota period value 'cpu.cfs_period_us'. + + The string must be a sequence of decimal numbers, each with + optional fraction and a unit suffix, such as "300ms". Valid + time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". + The value must be a positive duration. + """ + + cpu_manager_policy = proto.Field( + proto.STRING, + number=1, + ) + cpu_cfs_quota = proto.Field( + proto.MESSAGE, + number=2, + message=wrappers_pb2.BoolValue, + ) + cpu_cfs_quota_period = proto.Field( + proto.STRING, + number=3, + ) + + +class NodeConfig(proto.Message): + r"""Parameters that describe the nodes in a cluster. + Attributes: + machine_type (str): + The name of a Google Compute Engine `machine + type `__. + + If unspecified, the default machine type is ``e2-medium``. + disk_size_gb (int): + Size of the disk attached to each node, + specified in GB. The smallest allowed disk size + is 10GB. + If unspecified, the default disk size is 100GB. + oauth_scopes (Sequence[str]): + The set of Google API scopes to be made available on all of + the node VMs under the "default" service account. + + The following scopes are recommended, but not required, and + by default are not included: + + - ``https://www.googleapis.com/auth/compute`` is required + for mounting persistent storage on your nodes. + - ``https://www.googleapis.com/auth/devstorage.read_only`` + is required for communicating with **gcr.io** (the + `Google Container + Registry `__). + + If unspecified, no scopes are added, unless Cloud Logging or + Cloud Monitoring are enabled, in which case their required + scopes will be added. + service_account (str): + The Google Cloud Platform Service Account to + be used by the node VMs. Specify the email + address of the Service Account; otherwise, if no + Service Account is specified, the "default" + service account is used. + metadata (Sequence[google.container_v1beta1.types.NodeConfig.MetadataEntry]): + The metadata key/value pairs assigned to instances in the + cluster. + + Keys must conform to the regexp ``[a-zA-Z0-9-_]+`` and be + less than 128 bytes in length. These are reflected as part + of a URL in the metadata server. Additionally, to avoid + ambiguity, keys must not conflict with any other metadata + keys for the project or be one of the reserved keys: + + - "cluster-location" + - "cluster-name" + - "cluster-uid" + - "configure-sh" + - "containerd-configure-sh" + - "enable-oslogin" + - "gci-ensure-gke-docker" + - "gci-metrics-enabled" + - "gci-update-strategy" + - "instance-template" + - "kube-env" + - "startup-script" + - "user-data" + - "disable-address-manager" + - "windows-startup-script-ps1" + - "common-psm1" + - "k8s-node-setup-psm1" + - "install-ssh-psm1" + - "user-profile-psm1" + + The following keys are reserved for Windows nodes: + + - "serial-port-logging-enable" + + Values are free-form strings, and only have meaning as + interpreted by the image running in the instance. The only + restriction placed on them is that each value's size must be + less than or equal to 32 KB. + + The total size of all keys and values must be less than 512 + KB. + image_type (str): + The image type to use for this node. Note + that for a given image type, the latest version + of it will be used. + labels (Sequence[google.container_v1beta1.types.NodeConfig.LabelsEntry]): + The map of Kubernetes labels (key/value + pairs) to be applied to each node. These will + added in addition to any default label(s) that + Kubernetes may apply to the node. + In case of conflict in label keys, the applied + set may differ depending on the Kubernetes + version -- it's best to assume the behavior is + undefined and conflicts should be avoided. + For more information, including usage and the + valid values, see: + https://kubernetes.io/docs/concepts/overview/working- + with-objects/labels/ + local_ssd_count (int): + The number of local SSD disks to be attached + to the node. + The limit for this value is dependent upon the + maximum number of disks available on a machine + per zone. See: + https://cloud.google.com/compute/docs/disks/local- + ssd for more information. + tags (Sequence[str]): + The list of instance tags applied to all + nodes. Tags are used to identify valid sources + or targets for network firewalls and are + specified by the client during cluster or node + pool creation. Each tag within the list must + comply with RFC1035. + preemptible (bool): + Whether the nodes are created as preemptible + VM instances. See: + https://cloud.google.com/compute/docs/instances/preemptible + for more inforamtion about preemptible VM + instances. + accelerators (Sequence[google.container_v1beta1.types.AcceleratorConfig]): + A list of hardware accelerators to be + attached to each node. See + https://cloud.google.com/compute/docs/gpus for + more information about support for GPUs. + sandbox_config (google.container_v1beta1.types.SandboxConfig): + Sandbox configuration for this node. + node_group (str): + Setting this field will assign instances of this pool to run + on the specified node group. This is useful for running + workloads on `sole tenant + nodes `__. + reservation_affinity (google.container_v1beta1.types.ReservationAffinity): + The optional reservation affinity. Setting this field will + apply the specified `Zonal Compute + Reservation `__ + to this node pool. + disk_type (str): + Type of the disk attached to each node (e.g. + 'pd-standard', 'pd-ssd' or 'pd-balanced') + + If unspecified, the default disk type is 'pd- + standard' + min_cpu_platform (str): + Minimum CPU platform to be used by this instance. The + instance may be scheduled on the specified or newer CPU + platform. Applicable values are the friendly names of CPU + platforms, such as ``minCpuPlatform: "Intel Haswell"`` or + ``minCpuPlatform: "Intel Sandy Bridge"``. For more + information, read `how to specify min CPU + platform `__ + workload_metadata_config (google.container_v1beta1.types.WorkloadMetadataConfig): + The workload metadata configuration for this + node. + taints (Sequence[google.container_v1beta1.types.NodeTaint]): + List of kubernetes taints to be applied to + each node. + For more information, including usage and the + valid values, see: + https://kubernetes.io/docs/concepts/configuration/taint- + and-toleration/ + boot_disk_kms_key (str): + The Customer Managed Encryption Key used to encrypt the boot + disk attached to each node in the node pool. This should be + of the form + projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. + For more information about protecting resources with Cloud + KMS Keys please see: + https://cloud.google.com/compute/docs/disks/customer-managed-encryption + shielded_instance_config (google.container_v1beta1.types.ShieldedInstanceConfig): + Shielded Instance options. + linux_node_config (google.container_v1beta1.types.LinuxNodeConfig): + Parameters that can be configured on Linux + nodes. + kubelet_config (google.container_v1beta1.types.NodeKubeletConfig): + Node kubelet configs. + ephemeral_storage_config (google.container_v1beta1.types.EphemeralStorageConfig): + Parameters for the ephemeral storage + filesystem. If unspecified, ephemeral storage is + backed by the boot disk. + """ + + machine_type = proto.Field( + proto.STRING, + number=1, + ) + disk_size_gb = proto.Field( + proto.INT32, + number=2, + ) + oauth_scopes = proto.RepeatedField( + proto.STRING, + number=3, + ) + service_account = proto.Field( + proto.STRING, + number=9, + ) + metadata = proto.MapField( + proto.STRING, + proto.STRING, + number=4, + ) + image_type = proto.Field( + proto.STRING, + number=5, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=6, + ) + local_ssd_count = proto.Field( + proto.INT32, + number=7, + ) + tags = proto.RepeatedField( + proto.STRING, + number=8, + ) + preemptible = proto.Field( + proto.BOOL, + number=10, + ) + accelerators = proto.RepeatedField( + proto.MESSAGE, + number=11, + message='AcceleratorConfig', + ) + sandbox_config = proto.Field( + proto.MESSAGE, + number=17, + message='SandboxConfig', + ) + node_group = proto.Field( + proto.STRING, + number=18, + ) + reservation_affinity = proto.Field( + proto.MESSAGE, + number=19, + message='ReservationAffinity', + ) + disk_type = proto.Field( + proto.STRING, + number=12, + ) + min_cpu_platform = proto.Field( + proto.STRING, + number=13, + ) + workload_metadata_config = proto.Field( + proto.MESSAGE, + number=14, + message='WorkloadMetadataConfig', + ) + taints = proto.RepeatedField( + proto.MESSAGE, + number=15, + message='NodeTaint', + ) + boot_disk_kms_key = proto.Field( + proto.STRING, + number=23, + ) + shielded_instance_config = proto.Field( + proto.MESSAGE, + number=20, + message='ShieldedInstanceConfig', + ) + linux_node_config = proto.Field( + proto.MESSAGE, + number=21, + message='LinuxNodeConfig', + ) + kubelet_config = proto.Field( + proto.MESSAGE, + number=22, + message='NodeKubeletConfig', + ) + ephemeral_storage_config = proto.Field( + proto.MESSAGE, + number=24, + message='EphemeralStorageConfig', + ) + + +class ShieldedInstanceConfig(proto.Message): + r"""A set of Shielded Instance options. + Attributes: + enable_secure_boot (bool): + Defines whether the instance has Secure Boot + enabled. + Secure Boot helps ensure that the system only + runs authentic software by verifying the digital + signature of all boot components, and halting + the boot process if signature verification + fails. + enable_integrity_monitoring (bool): + Defines whether the instance has integrity + monitoring enabled. + Enables monitoring and attestation of the boot + integrity of the instance. The attestation is + performed against the integrity policy baseline. + This baseline is initially derived from the + implicitly trusted boot image when the instance + is created. + """ + + enable_secure_boot = proto.Field( + proto.BOOL, + number=1, + ) + enable_integrity_monitoring = proto.Field( + proto.BOOL, + number=2, + ) + + +class SandboxConfig(proto.Message): + r"""SandboxConfig contains configurations of the sandbox to use + for the node. + + Attributes: + sandbox_type (str): + Type of the sandbox to use for the node (e.g. + 'gvisor') + type_ (google.container_v1beta1.types.SandboxConfig.Type): + Type of the sandbox to use for the node. + """ + class Type(proto.Enum): + r"""Possible types of sandboxes.""" + UNSPECIFIED = 0 + GVISOR = 1 + + sandbox_type = proto.Field( + proto.STRING, + number=1, + ) + type_ = proto.Field( + proto.ENUM, + number=2, + enum=Type, + ) + + +class EphemeralStorageConfig(proto.Message): + r"""EphemeralStorageConfig contains configuration for the + ephemeral storage filesystem. + + Attributes: + local_ssd_count (int): + Number of local SSDs to use to back ephemeral + storage. Uses NVMe interfaces. Each local SSD is + 375 GB in size. If zero, it means to disable + using local SSDs as ephemeral storage. + """ + + local_ssd_count = proto.Field( + proto.INT32, + number=1, + ) + + +class ReservationAffinity(proto.Message): + r"""`ReservationAffinity `__ + is the configuration of desired reservation which instances could + take capacity from. + + Attributes: + consume_reservation_type (google.container_v1beta1.types.ReservationAffinity.Type): + Corresponds to the type of reservation + consumption. + key (str): + Corresponds to the label key of a reservation resource. To + target a SPECIFIC_RESERVATION by name, specify + "googleapis.com/reservation-name" as the key and specify the + name of your reservation as its value. + values (Sequence[str]): + Corresponds to the label value(s) of + reservation resource(s). + """ + class Type(proto.Enum): + r"""Indicates whether to consume capacity from a reservation or + not. + """ + UNSPECIFIED = 0 + NO_RESERVATION = 1 + ANY_RESERVATION = 2 + SPECIFIC_RESERVATION = 3 + + consume_reservation_type = proto.Field( + proto.ENUM, + number=1, + enum=Type, + ) + key = proto.Field( + proto.STRING, + number=2, + ) + values = proto.RepeatedField( + proto.STRING, + number=3, + ) + + +class NodeTaint(proto.Message): + r"""Kubernetes taint is comprised of three fields: key, value, and + effect. Effect can only be one of three types: NoSchedule, + PreferNoSchedule or NoExecute. + + See + `here `__ + for more information, including usage and the valid values. + + Attributes: + key (str): + Key for taint. + value (str): + Value for taint. + effect (google.container_v1beta1.types.NodeTaint.Effect): + Effect for taint. + """ + class Effect(proto.Enum): + r"""Possible values for Effect in taint.""" + EFFECT_UNSPECIFIED = 0 + NO_SCHEDULE = 1 + PREFER_NO_SCHEDULE = 2 + NO_EXECUTE = 3 + + key = proto.Field( + proto.STRING, + number=1, + ) + value = proto.Field( + proto.STRING, + number=2, + ) + effect = proto.Field( + proto.ENUM, + number=3, + enum=Effect, + ) + + +class MasterAuth(proto.Message): + r"""The authentication information for accessing the master + endpoint. Authentication can be done using HTTP basic auth or + using client certificates. + + Attributes: + username (str): + The username to use for HTTP basic + authentication to the master endpoint. For + clusters v1.6.0 and later, basic authentication + can be disabled by leaving username unspecified + (or setting it to the empty string). + Warning: basic authentication is deprecated, and + will be removed in GKE control plane versions + 1.19 and newer. For a list of recommended + authentication methods, see: + https://cloud.google.com/kubernetes- + engine/docs/how-to/api-server-authentication + password (str): + The password to use for HTTP basic + authentication to the master endpoint. Because + the master endpoint is open to the Internet, you + should create a strong password. If a password + is provided for cluster creation, username must + be non-empty. + + Warning: basic authentication is deprecated, and + will be removed in GKE control plane versions + 1.19 and newer. For a list of recommended + authentication methods, see: + https://cloud.google.com/kubernetes- + engine/docs/how-to/api-server-authentication + client_certificate_config (google.container_v1beta1.types.ClientCertificateConfig): + Configuration for client certificate + authentication on the cluster. For clusters + before v1.12, if no configuration is specified, + a client certificate is issued. + cluster_ca_certificate (str): + + client_certificate (str): + [Output only] Base64-encoded public certificate used by + clients to authenticate to the cluster endpoint. + client_key (str): + [Output only] Base64-encoded private key used by clients to + authenticate to the cluster endpoint. + """ + + username = proto.Field( + proto.STRING, + number=1, + ) + password = proto.Field( + proto.STRING, + number=2, + ) + client_certificate_config = proto.Field( + proto.MESSAGE, + number=3, + message='ClientCertificateConfig', + ) + cluster_ca_certificate = proto.Field( + proto.STRING, + number=100, + ) + client_certificate = proto.Field( + proto.STRING, + number=101, + ) + client_key = proto.Field( + proto.STRING, + number=102, + ) + + +class ClientCertificateConfig(proto.Message): + r"""Configuration for client certificates on the cluster. + Attributes: + issue_client_certificate (bool): + Issue a client certificate. + """ + + issue_client_certificate = proto.Field( + proto.BOOL, + number=1, + ) + + +class AddonsConfig(proto.Message): + r"""Configuration for the addons that can be automatically spun + up in the cluster, enabling additional functionality. + + Attributes: + http_load_balancing (google.container_v1beta1.types.HttpLoadBalancing): + Configuration for the HTTP (L7) load + balancing controller addon, which makes it easy + to set up HTTP load balancers for services in a + cluster. + horizontal_pod_autoscaling (google.container_v1beta1.types.HorizontalPodAutoscaling): + Configuration for the horizontal pod + autoscaling feature, which increases or + decreases the number of replica pods a + replication controller has based on the resource + usage of the existing pods. + kubernetes_dashboard (google.container_v1beta1.types.KubernetesDashboard): + Configuration for the Kubernetes Dashboard. + This addon is deprecated, and will be disabled + in 1.15. It is recommended to use the Cloud + Console to manage and monitor your Kubernetes + clusters, workloads and applications. For more + information, see: + https://cloud.google.com/kubernetes- + engine/docs/concepts/dashboards + network_policy_config (google.container_v1beta1.types.NetworkPolicyConfig): + Configuration for NetworkPolicy. This only + tracks whether the addon is enabled or not on + the Master, it does not track whether network + policy is enabled for the nodes. + istio_config (google.container_v1beta1.types.IstioConfig): + Configuration for Istio, an open platform to + connect, manage, and secure microservices. + cloud_run_config (google.container_v1beta1.types.CloudRunConfig): + Configuration for the Cloud Run addon. The ``IstioConfig`` + addon must be enabled in order to enable Cloud Run addon. + This option can only be enabled at cluster creation time. + dns_cache_config (google.container_v1beta1.types.DnsCacheConfig): + Configuration for NodeLocalDNS, a dns cache + running on cluster nodes + config_connector_config (google.container_v1beta1.types.ConfigConnectorConfig): + Configuration for the ConfigConnector add-on, + a Kubernetes extension to manage hosted GCP + services through the Kubernetes API + gce_persistent_disk_csi_driver_config (google.container_v1beta1.types.GcePersistentDiskCsiDriverConfig): + Configuration for the Compute Engine + Persistent Disk CSI driver. + kalm_config (google.container_v1beta1.types.KalmConfig): + Configuration for the KALM addon, which + manages the lifecycle of k8s applications. + """ + + http_load_balancing = proto.Field( + proto.MESSAGE, + number=1, + message='HttpLoadBalancing', + ) + horizontal_pod_autoscaling = proto.Field( + proto.MESSAGE, + number=2, + message='HorizontalPodAutoscaling', + ) + kubernetes_dashboard = proto.Field( + proto.MESSAGE, + number=3, + message='KubernetesDashboard', + ) + network_policy_config = proto.Field( + proto.MESSAGE, + number=4, + message='NetworkPolicyConfig', + ) + istio_config = proto.Field( + proto.MESSAGE, + number=5, + message='IstioConfig', + ) + cloud_run_config = proto.Field( + proto.MESSAGE, + number=7, + message='CloudRunConfig', + ) + dns_cache_config = proto.Field( + proto.MESSAGE, + number=8, + message='DnsCacheConfig', + ) + config_connector_config = proto.Field( + proto.MESSAGE, + number=10, + message='ConfigConnectorConfig', + ) + gce_persistent_disk_csi_driver_config = proto.Field( + proto.MESSAGE, + number=11, + message='GcePersistentDiskCsiDriverConfig', + ) + kalm_config = proto.Field( + proto.MESSAGE, + number=12, + message='KalmConfig', + ) + + +class HttpLoadBalancing(proto.Message): + r"""Configuration options for the HTTP (L7) load balancing + controller addon, which makes it easy to set up HTTP load + balancers for services in a cluster. + + Attributes: + disabled (bool): + Whether the HTTP Load Balancing controller is + enabled in the cluster. When enabled, it runs a + small pod in the cluster that manages the load + balancers. + """ + + disabled = proto.Field( + proto.BOOL, + number=1, + ) + + +class HorizontalPodAutoscaling(proto.Message): + r"""Configuration options for the horizontal pod autoscaling + feature, which increases or decreases the number of replica pods + a replication controller has based on the resource usage of the + existing pods. + + Attributes: + disabled (bool): + Whether the Horizontal Pod Autoscaling + feature is enabled in the cluster. When enabled, + it ensures that metrics are collected into + Stackdriver Monitoring. + """ + + disabled = proto.Field( + proto.BOOL, + number=1, + ) + + +class KubernetesDashboard(proto.Message): + r"""Configuration for the Kubernetes Dashboard. + Attributes: + disabled (bool): + Whether the Kubernetes Dashboard is enabled + for this cluster. + """ + + disabled = proto.Field( + proto.BOOL, + number=1, + ) + + +class NetworkPolicyConfig(proto.Message): + r"""Configuration for NetworkPolicy. This only tracks whether the + addon is enabled or not on the Master, it does not track whether + network policy is enabled for the nodes. + + Attributes: + disabled (bool): + Whether NetworkPolicy is enabled for this + cluster. + """ + + disabled = proto.Field( + proto.BOOL, + number=1, + ) + + +class DnsCacheConfig(proto.Message): + r"""Configuration for NodeLocal DNSCache + Attributes: + enabled (bool): + Whether NodeLocal DNSCache is enabled for + this cluster. + """ + + enabled = proto.Field( + proto.BOOL, + number=1, + ) + + +class KalmConfig(proto.Message): + r"""Configuration options for the KALM addon. + Attributes: + enabled (bool): + Whether KALM is enabled for this cluster. + """ + + enabled = proto.Field( + proto.BOOL, + number=1, + ) + + +class ConfigConnectorConfig(proto.Message): + r"""Configuration options for the Config Connector add-on. + Attributes: + enabled (bool): + Whether Cloud Connector is enabled for this + cluster. + """ + + enabled = proto.Field( + proto.BOOL, + number=1, + ) + + +class GcePersistentDiskCsiDriverConfig(proto.Message): + r"""Configuration for the Compute Engine PD CSI driver. This + option can only be enabled at cluster creation time. + + Attributes: + enabled (bool): + Whether the Compute Engine PD CSI driver is + enabled for this cluster. + """ + + enabled = proto.Field( + proto.BOOL, + number=1, + ) + + +class PrivateClusterMasterGlobalAccessConfig(proto.Message): + r"""Configuration for controlling master global access settings. + Attributes: + enabled (bool): + Whenever master is accessible globally or + not. + """ + + enabled = proto.Field( + proto.BOOL, + number=1, + ) + + +class PrivateClusterConfig(proto.Message): + r"""Configuration options for private clusters. + Attributes: + enable_private_nodes (bool): + Whether nodes have internal IP addresses + only. If enabled, all nodes are given only RFC + 1918 private addresses and communicate with the + master via private networking. + enable_private_endpoint (bool): + Whether the master's internal IP address is + used as the cluster endpoint. + master_ipv4_cidr_block (str): + The IP range in CIDR notation to use for the + hosted master network. This range will be used + for assigning internal IP addresses to the + master or set of masters, as well as the ILB + VIP. This range must not overlap with any other + ranges in use within the cluster's network. + private_endpoint (str): + Output only. The internal IP address of this + cluster's master endpoint. + public_endpoint (str): + Output only. The external IP address of this + cluster's master endpoint. + peering_name (str): + Output only. The peering name in the customer + VPC used by this cluster. + master_global_access_config (google.container_v1beta1.types.PrivateClusterMasterGlobalAccessConfig): + Controls master global access settings. + """ + + enable_private_nodes = proto.Field( + proto.BOOL, + number=1, + ) + enable_private_endpoint = proto.Field( + proto.BOOL, + number=2, + ) + master_ipv4_cidr_block = proto.Field( + proto.STRING, + number=3, + ) + private_endpoint = proto.Field( + proto.STRING, + number=4, + ) + public_endpoint = proto.Field( + proto.STRING, + number=5, + ) + peering_name = proto.Field( + proto.STRING, + number=7, + ) + master_global_access_config = proto.Field( + proto.MESSAGE, + number=8, + message='PrivateClusterMasterGlobalAccessConfig', + ) + + +class IstioConfig(proto.Message): + r"""Configuration options for Istio addon. + Attributes: + disabled (bool): + Whether Istio is enabled for this cluster. + auth (google.container_v1beta1.types.IstioConfig.IstioAuthMode): + The specified Istio auth mode, either none, + or mutual TLS. + """ + class IstioAuthMode(proto.Enum): + r"""Istio auth mode, + https://istio.io/docs/concepts/security/mutual-tls.html + """ + AUTH_NONE = 0 + AUTH_MUTUAL_TLS = 1 + + disabled = proto.Field( + proto.BOOL, + number=1, + ) + auth = proto.Field( + proto.ENUM, + number=2, + enum=IstioAuthMode, + ) + + +class CloudRunConfig(proto.Message): + r"""Configuration options for the Cloud Run feature. + Attributes: + disabled (bool): + Whether Cloud Run addon is enabled for this + cluster. + load_balancer_type (google.container_v1beta1.types.CloudRunConfig.LoadBalancerType): + Which load balancer type is installed for + Cloud Run. + """ + class LoadBalancerType(proto.Enum): + r"""Load balancer type of ingress service of Cloud Run.""" + LOAD_BALANCER_TYPE_UNSPECIFIED = 0 + LOAD_BALANCER_TYPE_EXTERNAL = 1 + LOAD_BALANCER_TYPE_INTERNAL = 2 + + disabled = proto.Field( + proto.BOOL, + number=1, + ) + load_balancer_type = proto.Field( + proto.ENUM, + number=3, + enum=LoadBalancerType, + ) + + +class MasterAuthorizedNetworksConfig(proto.Message): + r"""Configuration options for the master authorized networks + feature. Enabled master authorized networks will disallow all + external traffic to access Kubernetes master through HTTPS + except traffic from the given CIDR blocks, Google Compute Engine + Public IPs and Google Prod IPs. + + Attributes: + enabled (bool): + Whether or not master authorized networks is + enabled. + cidr_blocks (Sequence[google.container_v1beta1.types.MasterAuthorizedNetworksConfig.CidrBlock]): + cidr_blocks define up to 10 external networks that could + access Kubernetes master through HTTPS. + """ + + class CidrBlock(proto.Message): + r"""CidrBlock contains an optional name and one CIDR block. + Attributes: + display_name (str): + display_name is an optional field for users to identify CIDR + blocks. + cidr_block (str): + cidr_block must be specified in CIDR notation. + """ + + display_name = proto.Field( + proto.STRING, + number=1, + ) + cidr_block = proto.Field( + proto.STRING, + number=2, + ) + + enabled = proto.Field( + proto.BOOL, + number=1, + ) + cidr_blocks = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=CidrBlock, + ) + + +class LegacyAbac(proto.Message): + r"""Configuration for the legacy Attribute Based Access Control + authorization mode. + + Attributes: + enabled (bool): + Whether the ABAC authorizer is enabled for + this cluster. When enabled, identities in the + system, including service accounts, nodes, and + controllers, will have statically granted + permissions beyond those provided by the RBAC + configuration or IAM. + """ + + enabled = proto.Field( + proto.BOOL, + number=1, + ) + + +class NetworkPolicy(proto.Message): + r"""Configuration options for the NetworkPolicy feature. + https://kubernetes.io/docs/concepts/services- + networking/networkpolicies/ + + Attributes: + provider (google.container_v1beta1.types.NetworkPolicy.Provider): + The selected network policy provider. + enabled (bool): + Whether network policy is enabled on the + cluster. + """ + class Provider(proto.Enum): + r"""Allowed Network Policy providers.""" + PROVIDER_UNSPECIFIED = 0 + CALICO = 1 + + provider = proto.Field( + proto.ENUM, + number=1, + enum=Provider, + ) + enabled = proto.Field( + proto.BOOL, + number=2, + ) + + +class IPAllocationPolicy(proto.Message): + r"""Configuration for controlling how IPs are allocated in the + cluster. + + Attributes: + use_ip_aliases (bool): + Whether alias IPs will be used for pod IPs in the cluster. + This is used in conjunction with use_routes. It cannot be + true if use_routes is true. If both use_ip_aliases and + use_routes are false, then the server picks the default IP + allocation mode + create_subnetwork (bool): + Whether a new subnetwork will be created automatically for + the cluster. + + This field is only applicable when ``use_ip_aliases`` is + true. + subnetwork_name (str): + A custom subnetwork name to be used if ``create_subnetwork`` + is true. If this field is empty, then an automatic name will + be chosen for the new subnetwork. + cluster_ipv4_cidr (str): + This field is deprecated, use cluster_ipv4_cidr_block. + node_ipv4_cidr (str): + This field is deprecated, use node_ipv4_cidr_block. + services_ipv4_cidr (str): + This field is deprecated, use services_ipv4_cidr_block. + cluster_secondary_range_name (str): + The name of the secondary range to be used for the cluster + CIDR block. The secondary range will be used for pod IP + addresses. This must be an existing secondary range + associated with the cluster subnetwork. + + This field is only applicable with use_ip_aliases and + create_subnetwork is false. + services_secondary_range_name (str): + The name of the secondary range to be used as for the + services CIDR block. The secondary range will be used for + service ClusterIPs. This must be an existing secondary range + associated with the cluster subnetwork. + + This field is only applicable with use_ip_aliases and + create_subnetwork is false. + cluster_ipv4_cidr_block (str): + The IP address range for the cluster pod IPs. If this field + is set, then ``cluster.cluster_ipv4_cidr`` must be left + blank. + + This field is only applicable when ``use_ip_aliases`` is + true. + + Set to blank to have a range chosen with the default size. + + Set to /netmask (e.g. ``/14``) to have a range chosen with a + specific netmask. + + Set to a + `CIDR `__ + notation (e.g. ``10.96.0.0/14``) from the RFC-1918 private + networks (e.g. ``10.0.0.0/8``, ``172.16.0.0/12``, + ``192.168.0.0/16``) to pick a specific range to use. + node_ipv4_cidr_block (str): + The IP address range of the instance IPs in this cluster. + + This is applicable only if ``create_subnetwork`` is true. + + Set to blank to have a range chosen with the default size. + + Set to /netmask (e.g. ``/14``) to have a range chosen with a + specific netmask. + + Set to a + `CIDR `__ + notation (e.g. ``10.96.0.0/14``) from the RFC-1918 private + networks (e.g. ``10.0.0.0/8``, ``172.16.0.0/12``, + ``192.168.0.0/16``) to pick a specific range to use. + services_ipv4_cidr_block (str): + The IP address range of the services IPs in this cluster. If + blank, a range will be automatically chosen with the default + size. + + This field is only applicable when ``use_ip_aliases`` is + true. + + Set to blank to have a range chosen with the default size. + + Set to /netmask (e.g. ``/14``) to have a range chosen with a + specific netmask. + + Set to a + `CIDR `__ + notation (e.g. ``10.96.0.0/14``) from the RFC-1918 private + networks (e.g. ``10.0.0.0/8``, ``172.16.0.0/12``, + ``192.168.0.0/16``) to pick a specific range to use. + allow_route_overlap (bool): + If true, allow allocation of cluster CIDR ranges that + overlap with certain kinds of network routes. By default we + do not allow cluster CIDR ranges to intersect with any user + declared routes. With allow_route_overlap == true, we allow + overlapping with CIDR ranges that are larger than the + cluster CIDR range. + + If this field is set to true, then cluster and services + CIDRs must be fully-specified (e.g. ``10.96.0.0/14``, but + not ``/14``), which means: + + 1) When ``use_ip_aliases`` is true, + ``cluster_ipv4_cidr_block`` and + ``services_ipv4_cidr_block`` must be fully-specified. + 2) When ``use_ip_aliases`` is false, + ``cluster.cluster_ipv4_cidr`` muse be fully-specified. + tpu_ipv4_cidr_block (str): + The IP address range of the Cloud TPUs in this cluster. If + unspecified, a range will be automatically chosen with the + default size. + + This field is only applicable when ``use_ip_aliases`` is + true. + + If unspecified, the range will use the default size. + + Set to /netmask (e.g. ``/14``) to have a range chosen with a + specific netmask. + + Set to a + `CIDR `__ + notation (e.g. ``10.96.0.0/14``) from the RFC-1918 private + networks (e.g. ``10.0.0.0/8``, ``172.16.0.0/12``, + ``192.168.0.0/16``) to pick a specific range to use. This + field is deprecated, use cluster.tpu_config.ipv4_cidr_block + instead. + use_routes (bool): + Whether routes will be used for pod IPs in the cluster. This + is used in conjunction with use_ip_aliases. It cannot be + true if use_ip_aliases is true. If both use_ip_aliases and + use_routes are false, then the server picks the default IP + allocation mode + """ + + use_ip_aliases = proto.Field( + proto.BOOL, + number=1, + ) + create_subnetwork = proto.Field( + proto.BOOL, + number=2, + ) + subnetwork_name = proto.Field( + proto.STRING, + number=3, + ) + cluster_ipv4_cidr = proto.Field( + proto.STRING, + number=4, + ) + node_ipv4_cidr = proto.Field( + proto.STRING, + number=5, + ) + services_ipv4_cidr = proto.Field( + proto.STRING, + number=6, + ) + cluster_secondary_range_name = proto.Field( + proto.STRING, + number=7, + ) + services_secondary_range_name = proto.Field( + proto.STRING, + number=8, + ) + cluster_ipv4_cidr_block = proto.Field( + proto.STRING, + number=9, + ) + node_ipv4_cidr_block = proto.Field( + proto.STRING, + number=10, + ) + services_ipv4_cidr_block = proto.Field( + proto.STRING, + number=11, + ) + allow_route_overlap = proto.Field( + proto.BOOL, + number=12, + ) + tpu_ipv4_cidr_block = proto.Field( + proto.STRING, + number=13, + ) + use_routes = proto.Field( + proto.BOOL, + number=15, + ) + + +class BinaryAuthorization(proto.Message): + r"""Configuration for Binary Authorization. + Attributes: + enabled (bool): + Enable Binary Authorization for this cluster. + If enabled, all container images will be + validated by Google Binauthz. + """ + + enabled = proto.Field( + proto.BOOL, + number=1, + ) + + +class PodSecurityPolicyConfig(proto.Message): + r"""Configuration for the PodSecurityPolicy feature. + Attributes: + enabled (bool): + Enable the PodSecurityPolicy controller for + this cluster. If enabled, pods must be valid + under a PodSecurityPolicy to be created. + """ + + enabled = proto.Field( + proto.BOOL, + number=1, + ) + + +class AuthenticatorGroupsConfig(proto.Message): + r"""Configuration for returning group information from + authenticators. + + Attributes: + enabled (bool): + Whether this cluster should return group + membership lookups during authentication using a + group of security groups. + security_group (str): + The name of the security group-of-groups to + be used. Only relevant if enabled = true. + """ + + enabled = proto.Field( + proto.BOOL, + number=1, + ) + security_group = proto.Field( + proto.STRING, + number=2, + ) + + +class ClusterTelemetry(proto.Message): + r"""Telemetry integration for the cluster. + Attributes: + type_ (google.container_v1beta1.types.ClusterTelemetry.Type): + Type of the integration. + """ + class Type(proto.Enum): + r"""Type of the integration.""" + UNSPECIFIED = 0 + DISABLED = 1 + ENABLED = 2 + SYSTEM_ONLY = 3 + + type_ = proto.Field( + proto.ENUM, + number=1, + enum=Type, + ) + + +class Cluster(proto.Message): + r"""A Google Kubernetes Engine cluster. + Attributes: + name (str): + The name of this cluster. The name must be unique within + this project and location (e.g. zone or region), and can be + up to 40 characters with the following restrictions: + + - Lowercase letters, numbers, and hyphens only. + - Must start with a letter. + - Must end with a number or a letter. + description (str): + An optional description of this cluster. + initial_node_count (int): + The number of nodes to create in this cluster. You must + ensure that your Compute Engine `resource + quota `__ is + sufficient for this number of instances. You must also have + available firewall and routes quota. For requests, this + field should only be used in lieu of a "node_pool" object, + since this configuration (along with the "node_config") will + be used to create a "NodePool" object with an auto-generated + name. Do not use this and a node_pool at the same time. + + This field is deprecated, use node_pool.initial_node_count + instead. + node_config (google.container_v1beta1.types.NodeConfig): + Parameters used in creating the cluster's nodes. For + requests, this field should only be used in lieu of a + "node_pool" object, since this configuration (along with the + "initial_node_count") will be used to create a "NodePool" + object with an auto-generated name. Do not use this and a + node_pool at the same time. For responses, this field will + be populated with the node configuration of the first node + pool. (For configuration of each node pool, see + ``node_pool.config``) + + If unspecified, the defaults are used. This field is + deprecated, use node_pool.config instead. + master_auth (google.container_v1beta1.types.MasterAuth): + The authentication information for accessing the master + endpoint. If unspecified, the defaults are used: For + clusters before v1.12, if master_auth is unspecified, + ``username`` will be set to "admin", a random password will + be generated, and a client certificate will be issued. + logging_service (str): + The logging service the cluster should use to write logs. + Currently available options: + + - ``logging.googleapis.com/kubernetes`` - The Cloud Logging + service with a Kubernetes-native resource model + - ``logging.googleapis.com`` - The legacy Cloud Logging + service (no longer available as of GKE 1.15). + - ``none`` - no logs will be exported from the cluster. + + If left as an empty + string,\ ``logging.googleapis.com/kubernetes`` will be used + for GKE 1.14+ or ``logging.googleapis.com`` for earlier + versions. + monitoring_service (str): + The monitoring service the cluster should use to write + metrics. Currently available options: + + - "monitoring.googleapis.com/kubernetes" - The Cloud + Monitoring service with a Kubernetes-native resource + model + - ``monitoring.googleapis.com`` - The legacy Cloud + Monitoring service (no longer available as of GKE 1.15). + - ``none`` - No metrics will be exported from the cluster. + + If left as an empty + string,\ ``monitoring.googleapis.com/kubernetes`` will be + used for GKE 1.14+ or ``monitoring.googleapis.com`` for + earlier versions. + network (str): + The name of the Google Compute Engine + `network `__ + to which the cluster is connected. If left unspecified, the + ``default`` network will be used. On output this shows the + network ID instead of the name. + cluster_ipv4_cidr (str): + The IP address range of the container pods in this cluster, + in + `CIDR `__ + notation (e.g. ``10.96.0.0/14``). Leave blank to have one + automatically chosen or specify a ``/14`` block in + ``10.0.0.0/8``. + addons_config (google.container_v1beta1.types.AddonsConfig): + Configurations for the various addons + available to run in the cluster. + subnetwork (str): + The name of the Google Compute Engine + `subnetwork `__ + to which the cluster is connected. On output this shows the + subnetwork ID instead of the name. + node_pools (Sequence[google.container_v1beta1.types.NodePool]): + The node pools associated with this cluster. This field + should not be set if "node_config" or "initial_node_count" + are specified. + locations (Sequence[str]): + The list of Google Compute Engine + `zones `__ + in which the cluster's nodes should be located. + + This field provides a default value if + `NodePool.Locations `__ + are not specified during node pool creation. + + Warning: changing cluster locations will update the + `NodePool.Locations `__ + of all node pools and will result in nodes being added + and/or removed. + enable_kubernetes_alpha (bool): + Kubernetes alpha features are enabled on this + cluster. This includes alpha API groups (e.g. + v1beta1) and features that may not be production + ready in the kubernetes version of the master + and nodes. The cluster has no SLA for uptime and + master/node upgrades are disabled. Alpha enabled + clusters are automatically deleted thirty days + after creation. + resource_labels (Sequence[google.container_v1beta1.types.Cluster.ResourceLabelsEntry]): + The resource labels for the cluster to use to + annotate any related Google Compute Engine + resources. + label_fingerprint (str): + The fingerprint of the set of labels for this + cluster. + legacy_abac (google.container_v1beta1.types.LegacyAbac): + Configuration for the legacy ABAC + authorization mode. + network_policy (google.container_v1beta1.types.NetworkPolicy): + Configuration options for the NetworkPolicy + feature. + ip_allocation_policy (google.container_v1beta1.types.IPAllocationPolicy): + Configuration for cluster IP allocation. + master_authorized_networks_config (google.container_v1beta1.types.MasterAuthorizedNetworksConfig): + The configuration options for master + authorized networks feature. + maintenance_policy (google.container_v1beta1.types.MaintenancePolicy): + Configure the maintenance policy for this + cluster. + binary_authorization (google.container_v1beta1.types.BinaryAuthorization): + Configuration for Binary Authorization. + pod_security_policy_config (google.container_v1beta1.types.PodSecurityPolicyConfig): + Configuration for the PodSecurityPolicy + feature. + autoscaling (google.container_v1beta1.types.ClusterAutoscaling): + Cluster-level autoscaling configuration. + network_config (google.container_v1beta1.types.NetworkConfig): + Configuration for cluster networking. + private_cluster (bool): + If this is a private cluster setup. Private clusters are + clusters that, by default have no external IP addresses on + the nodes and where nodes and the master communicate over + private IP addresses. This field is deprecated, use + private_cluster_config.enable_private_nodes instead. + master_ipv4_cidr_block (str): + The IP prefix in CIDR notation to use for the hosted master + network. This prefix will be used for assigning private IP + addresses to the master or set of masters, as well as the + ILB VIP. This field is deprecated, use + private_cluster_config.master_ipv4_cidr_block instead. + default_max_pods_constraint (google.container_v1beta1.types.MaxPodsConstraint): + The default constraint on the maximum number + of pods that can be run simultaneously on a node + in the node pool of this cluster. Only honored + if cluster created with IP Alias support. + resource_usage_export_config (google.container_v1beta1.types.ResourceUsageExportConfig): + Configuration for exporting resource usages. + Resource usage export is disabled when this + config unspecified. + authenticator_groups_config (google.container_v1beta1.types.AuthenticatorGroupsConfig): + Configuration controlling RBAC group + membership information. + private_cluster_config (google.container_v1beta1.types.PrivateClusterConfig): + Configuration for private cluster. + vertical_pod_autoscaling (google.container_v1beta1.types.VerticalPodAutoscaling): + Cluster-level Vertical Pod Autoscaling + configuration. + shielded_nodes (google.container_v1beta1.types.ShieldedNodes): + Shielded Nodes configuration. + release_channel (google.container_v1beta1.types.ReleaseChannel): + Release channel configuration. + workload_identity_config (google.container_v1beta1.types.WorkloadIdentityConfig): + Configuration for the use of Kubernetes + Service Accounts in GCP IAM policies. + cluster_telemetry (google.container_v1beta1.types.ClusterTelemetry): + Telemetry integration for the cluster. + tpu_config (google.container_v1beta1.types.TpuConfig): + Configuration for Cloud TPU support; + notification_config (google.container_v1beta1.types.NotificationConfig): + Notification configuration of the cluster. + confidential_nodes (google.container_v1beta1.types.ConfidentialNodes): + Configuration of Confidential Nodes + self_link (str): + [Output only] Server-defined URL for the resource. + zone (str): + [Output only] The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field is deprecated, use + location instead. + endpoint (str): + [Output only] The IP address of this cluster's master + endpoint. The endpoint can be accessed from the internet at + ``https://username:password@endpoint/``. + + See the ``masterAuth`` property of this resource for + username and password information. + initial_cluster_version (str): + The initial Kubernetes version for this + cluster. Valid versions are those found in + validMasterVersions returned by getServerConfig. + The version can be upgraded over time; such + upgrades are reflected in currentMasterVersion + and currentNodeVersion. + + Users may specify either explicit versions + offered by Kubernetes Engine or version aliases, + which have the following behavior: + - "latest": picks the highest valid Kubernetes + version - "1.X": picks the highest valid + patch+gke.N patch in the 1.X version - "1.X.Y": + picks the highest valid gke.N patch in the 1.X.Y + version - "1.X.Y-gke.N": picks an explicit + Kubernetes version - "","-": picks the default + Kubernetes version + current_master_version (str): + [Output only] The current software version of the master + endpoint. + current_node_version (str): + [Output only] Deprecated, use + `NodePool.version `__ + instead. The current version of the node software + components. If they are currently at multiple versions + because they're in the process of being upgraded, this + reflects the minimum version of all nodes. + create_time (str): + [Output only] The time the cluster was created, in + `RFC3339 `__ text + format. + status (google.container_v1beta1.types.Cluster.Status): + [Output only] The current status of this cluster. + status_message (str): + [Output only] Deprecated. Use conditions instead. Additional + information about the current status of this cluster, if + available. + node_ipv4_cidr_size (int): + [Output only] The size of the address space on each node for + hosting containers. This is provisioned from within the + ``container_ipv4_cidr`` range. This field will only be set + when cluster is in route-based network mode. + services_ipv4_cidr (str): + [Output only] The IP address range of the Kubernetes + services in this cluster, in + `CIDR `__ + notation (e.g. ``1.2.3.4/29``). Service addresses are + typically put in the last ``/16`` from the container CIDR. + instance_group_urls (Sequence[str]): + Deprecated. Use node_pools.instance_group_urls. + current_node_count (int): + [Output only] The number of nodes currently in the cluster. + Deprecated. Call Kubernetes API directly to retrieve node + information. + expire_time (str): + [Output only] The time the cluster will be automatically + deleted in + `RFC3339 `__ text + format. + location (str): + [Output only] The name of the Google Compute Engine + `zone `__ + or + `region `__ + in which the cluster resides. + enable_tpu (bool): + Enable the ability to use Cloud TPUs in this cluster. This + field is deprecated, use tpu_config.enabled instead. + tpu_ipv4_cidr_block (str): + [Output only] The IP address range of the Cloud TPUs in this + cluster, in + `CIDR `__ + notation (e.g. ``1.2.3.4/29``). + database_encryption (google.container_v1beta1.types.DatabaseEncryption): + Configuration of etcd encryption. + conditions (Sequence[google.container_v1beta1.types.StatusCondition]): + Which conditions caused the current cluster + state. + master (google.container_v1beta1.types.Master): + Configuration for master components. + """ + class Status(proto.Enum): + r"""The current status of the cluster.""" + STATUS_UNSPECIFIED = 0 + PROVISIONING = 1 + RUNNING = 2 + RECONCILING = 3 + STOPPING = 4 + ERROR = 5 + DEGRADED = 6 + + name = proto.Field( + proto.STRING, + number=1, + ) + description = proto.Field( + proto.STRING, + number=2, + ) + initial_node_count = proto.Field( + proto.INT32, + number=3, + ) + node_config = proto.Field( + proto.MESSAGE, + number=4, + message='NodeConfig', + ) + master_auth = proto.Field( + proto.MESSAGE, + number=5, + message='MasterAuth', + ) + logging_service = proto.Field( + proto.STRING, + number=6, + ) + monitoring_service = proto.Field( + proto.STRING, + number=7, + ) + network = proto.Field( + proto.STRING, + number=8, + ) + cluster_ipv4_cidr = proto.Field( + proto.STRING, + number=9, + ) + addons_config = proto.Field( + proto.MESSAGE, + number=10, + message='AddonsConfig', + ) + subnetwork = proto.Field( + proto.STRING, + number=11, + ) + node_pools = proto.RepeatedField( + proto.MESSAGE, + number=12, + message='NodePool', + ) + locations = proto.RepeatedField( + proto.STRING, + number=13, + ) + enable_kubernetes_alpha = proto.Field( + proto.BOOL, + number=14, + ) + resource_labels = proto.MapField( + proto.STRING, + proto.STRING, + number=15, + ) + label_fingerprint = proto.Field( + proto.STRING, + number=16, + ) + legacy_abac = proto.Field( + proto.MESSAGE, + number=18, + message='LegacyAbac', + ) + network_policy = proto.Field( + proto.MESSAGE, + number=19, + message='NetworkPolicy', + ) + ip_allocation_policy = proto.Field( + proto.MESSAGE, + number=20, + message='IPAllocationPolicy', + ) + master_authorized_networks_config = proto.Field( + proto.MESSAGE, + number=22, + message='MasterAuthorizedNetworksConfig', + ) + maintenance_policy = proto.Field( + proto.MESSAGE, + number=23, + message='MaintenancePolicy', + ) + binary_authorization = proto.Field( + proto.MESSAGE, + number=24, + message='BinaryAuthorization', + ) + pod_security_policy_config = proto.Field( + proto.MESSAGE, + number=25, + message='PodSecurityPolicyConfig', + ) + autoscaling = proto.Field( + proto.MESSAGE, + number=26, + message='ClusterAutoscaling', + ) + network_config = proto.Field( + proto.MESSAGE, + number=27, + message='NetworkConfig', + ) + private_cluster = proto.Field( + proto.BOOL, + number=28, + ) + master_ipv4_cidr_block = proto.Field( + proto.STRING, + number=29, + ) + default_max_pods_constraint = proto.Field( + proto.MESSAGE, + number=30, + message='MaxPodsConstraint', + ) + resource_usage_export_config = proto.Field( + proto.MESSAGE, + number=33, + message='ResourceUsageExportConfig', + ) + authenticator_groups_config = proto.Field( + proto.MESSAGE, + number=34, + message='AuthenticatorGroupsConfig', + ) + private_cluster_config = proto.Field( + proto.MESSAGE, + number=37, + message='PrivateClusterConfig', + ) + vertical_pod_autoscaling = proto.Field( + proto.MESSAGE, + number=39, + message='VerticalPodAutoscaling', + ) + shielded_nodes = proto.Field( + proto.MESSAGE, + number=40, + message='ShieldedNodes', + ) + release_channel = proto.Field( + proto.MESSAGE, + number=41, + message='ReleaseChannel', + ) + workload_identity_config = proto.Field( + proto.MESSAGE, + number=43, + message='WorkloadIdentityConfig', + ) + cluster_telemetry = proto.Field( + proto.MESSAGE, + number=46, + message='ClusterTelemetry', + ) + tpu_config = proto.Field( + proto.MESSAGE, + number=47, + message='TpuConfig', + ) + notification_config = proto.Field( + proto.MESSAGE, + number=49, + message='NotificationConfig', + ) + confidential_nodes = proto.Field( + proto.MESSAGE, + number=50, + message='ConfidentialNodes', + ) + self_link = proto.Field( + proto.STRING, + number=100, + ) + zone = proto.Field( + proto.STRING, + number=101, + ) + endpoint = proto.Field( + proto.STRING, + number=102, + ) + initial_cluster_version = proto.Field( + proto.STRING, + number=103, + ) + current_master_version = proto.Field( + proto.STRING, + number=104, + ) + current_node_version = proto.Field( + proto.STRING, + number=105, + ) + create_time = proto.Field( + proto.STRING, + number=106, + ) + status = proto.Field( + proto.ENUM, + number=107, + enum=Status, + ) + status_message = proto.Field( + proto.STRING, + number=108, + ) + node_ipv4_cidr_size = proto.Field( + proto.INT32, + number=109, + ) + services_ipv4_cidr = proto.Field( + proto.STRING, + number=110, + ) + instance_group_urls = proto.RepeatedField( + proto.STRING, + number=111, + ) + current_node_count = proto.Field( + proto.INT32, + number=112, + ) + expire_time = proto.Field( + proto.STRING, + number=113, + ) + location = proto.Field( + proto.STRING, + number=114, + ) + enable_tpu = proto.Field( + proto.BOOL, + number=115, + ) + tpu_ipv4_cidr_block = proto.Field( + proto.STRING, + number=116, + ) + database_encryption = proto.Field( + proto.MESSAGE, + number=38, + message='DatabaseEncryption', + ) + conditions = proto.RepeatedField( + proto.MESSAGE, + number=118, + message='StatusCondition', + ) + master = proto.Field( + proto.MESSAGE, + number=124, + message='Master', + ) + + +class ClusterUpdate(proto.Message): + r"""ClusterUpdate describes an update to the cluster. Exactly one + update can be applied to a cluster with each request, so at most + one field can be provided. + + Attributes: + desired_node_version (str): + The Kubernetes version to change the nodes to + (typically an upgrade). + + Users may specify either explicit versions + offered by Kubernetes Engine or version aliases, + which have the following behavior: + - "latest": picks the highest valid Kubernetes + version - "1.X": picks the highest valid + patch+gke.N patch in the 1.X version - "1.X.Y": + picks the highest valid gke.N patch in the 1.X.Y + version - "1.X.Y-gke.N": picks an explicit + Kubernetes version - "-": picks the Kubernetes + master version + desired_monitoring_service (str): + The monitoring service the cluster should use to write + metrics. Currently available options: + + - "monitoring.googleapis.com/kubernetes" - The Cloud + Monitoring service with a Kubernetes-native resource + model + - ``monitoring.googleapis.com`` - The legacy Cloud + Monitoring service (no longer available as of GKE 1.15). + - ``none`` - No metrics will be exported from the cluster. + + If left as an empty + string,\ ``monitoring.googleapis.com/kubernetes`` will be + used for GKE 1.14+ or ``monitoring.googleapis.com`` for + earlier versions. + desired_addons_config (google.container_v1beta1.types.AddonsConfig): + Configurations for the various addons + available to run in the cluster. + desired_node_pool_id (str): + The node pool to be upgraded. This field is mandatory if + "desired_node_version", "desired_image_family", + "desired_node_pool_autoscaling", or + "desired_workload_metadata_config" is specified and there is + more than one node pool on the cluster. + desired_image_type (str): + The desired image type for the node pool. NOTE: Set the + "desired_node_pool" field as well. + desired_node_pool_autoscaling (google.container_v1beta1.types.NodePoolAutoscaling): + Autoscaler configuration for the node pool specified in + desired_node_pool_id. If there is only one pool in the + cluster and desired_node_pool_id is not provided then the + change applies to that single node pool. + desired_locations (Sequence[str]): + The desired list of Google Compute Engine + `zones `__ + in which the cluster's nodes should be located. + + This list must always include the cluster's primary zone. + + Warning: changing cluster locations will update the + locations of all node pools and will result in nodes being + added and/or removed. + desired_master_authorized_networks_config (google.container_v1beta1.types.MasterAuthorizedNetworksConfig): + The desired configuration options for master + authorized networks feature. + desired_pod_security_policy_config (google.container_v1beta1.types.PodSecurityPolicyConfig): + The desired configuration options for the + PodSecurityPolicy feature. + desired_cluster_autoscaling (google.container_v1beta1.types.ClusterAutoscaling): + Cluster-level autoscaling configuration. + desired_binary_authorization (google.container_v1beta1.types.BinaryAuthorization): + The desired configuration options for the + Binary Authorization feature. + desired_logging_service (str): + The logging service the cluster should use to write logs. + Currently available options: + + - ``logging.googleapis.com/kubernetes`` - The Cloud Logging + service with a Kubernetes-native resource model + - ``logging.googleapis.com`` - The legacy Cloud Logging + service (no longer available as of GKE 1.15). + - ``none`` - no logs will be exported from the cluster. + + If left as an empty + string,\ ``logging.googleapis.com/kubernetes`` will be used + for GKE 1.14+ or ``logging.googleapis.com`` for earlier + versions. + desired_resource_usage_export_config (google.container_v1beta1.types.ResourceUsageExportConfig): + The desired configuration for exporting + resource usage. + desired_vertical_pod_autoscaling (google.container_v1beta1.types.VerticalPodAutoscaling): + Cluster-level Vertical Pod Autoscaling + configuration. + desired_private_cluster_config (google.container_v1beta1.types.PrivateClusterConfig): + The desired private cluster configuration. + desired_intra_node_visibility_config (google.container_v1beta1.types.IntraNodeVisibilityConfig): + The desired config of Intra-node visibility. + desired_default_snat_status (google.container_v1beta1.types.DefaultSnatStatus): + The desired status of whether to disable + default sNAT for this cluster. + desired_cluster_telemetry (google.container_v1beta1.types.ClusterTelemetry): + The desired telemetry integration for the + cluster. + desired_release_channel (google.container_v1beta1.types.ReleaseChannel): + The desired release channel configuration. + desired_tpu_config (google.container_v1beta1.types.TpuConfig): + The desired Cloud TPU configuration. + desired_datapath_provider (google.container_v1beta1.types.DatapathProvider): + The desired datapath provider for the + cluster. + desired_notification_config (google.container_v1beta1.types.NotificationConfig): + The desired notification configuration. + desired_master_version (str): + The Kubernetes version to change the master + to. The only valid value is the latest supported + version. + Users may specify either explicit versions + offered by Kubernetes Engine or version aliases, + which have the following behavior: + - "latest": picks the highest valid Kubernetes + version - "1.X": picks the highest valid + patch+gke.N patch in the 1.X version - "1.X.Y": + picks the highest valid gke.N patch in the 1.X.Y + version - "1.X.Y-gke.N": picks an explicit + Kubernetes version - "-": picks the default + Kubernetes version + desired_database_encryption (google.container_v1beta1.types.DatabaseEncryption): + Configuration of etcd encryption. + desired_workload_identity_config (google.container_v1beta1.types.WorkloadIdentityConfig): + Configuration for Workload Identity. + desired_shielded_nodes (google.container_v1beta1.types.ShieldedNodes): + Configuration for Shielded Nodes. + desired_master (google.container_v1beta1.types.Master): + Configuration for master components. + desired_authenticator_groups_config (google.container_v1beta1.types.AuthenticatorGroupsConfig): + AuthenticatorGroupsConfig specifies the + config for the cluster security groups settings. + """ + + desired_node_version = proto.Field( + proto.STRING, + number=4, + ) + desired_monitoring_service = proto.Field( + proto.STRING, + number=5, + ) + desired_addons_config = proto.Field( + proto.MESSAGE, + number=6, + message='AddonsConfig', + ) + desired_node_pool_id = proto.Field( + proto.STRING, + number=7, + ) + desired_image_type = proto.Field( + proto.STRING, + number=8, + ) + desired_node_pool_autoscaling = proto.Field( + proto.MESSAGE, + number=9, + message='NodePoolAutoscaling', + ) + desired_locations = proto.RepeatedField( + proto.STRING, + number=10, + ) + desired_master_authorized_networks_config = proto.Field( + proto.MESSAGE, + number=12, + message='MasterAuthorizedNetworksConfig', + ) + desired_pod_security_policy_config = proto.Field( + proto.MESSAGE, + number=14, + message='PodSecurityPolicyConfig', + ) + desired_cluster_autoscaling = proto.Field( + proto.MESSAGE, + number=15, + message='ClusterAutoscaling', + ) + desired_binary_authorization = proto.Field( + proto.MESSAGE, + number=16, + message='BinaryAuthorization', + ) + desired_logging_service = proto.Field( + proto.STRING, + number=19, + ) + desired_resource_usage_export_config = proto.Field( + proto.MESSAGE, + number=21, + message='ResourceUsageExportConfig', + ) + desired_vertical_pod_autoscaling = proto.Field( + proto.MESSAGE, + number=22, + message='VerticalPodAutoscaling', + ) + desired_private_cluster_config = proto.Field( + proto.MESSAGE, + number=25, + message='PrivateClusterConfig', + ) + desired_intra_node_visibility_config = proto.Field( + proto.MESSAGE, + number=26, + message='IntraNodeVisibilityConfig', + ) + desired_default_snat_status = proto.Field( + proto.MESSAGE, + number=28, + message='DefaultSnatStatus', + ) + desired_cluster_telemetry = proto.Field( + proto.MESSAGE, + number=30, + message='ClusterTelemetry', + ) + desired_release_channel = proto.Field( + proto.MESSAGE, + number=31, + message='ReleaseChannel', + ) + desired_tpu_config = proto.Field( + proto.MESSAGE, + number=38, + message='TpuConfig', + ) + desired_datapath_provider = proto.Field( + proto.ENUM, + number=50, + enum='DatapathProvider', + ) + desired_notification_config = proto.Field( + proto.MESSAGE, + number=55, + message='NotificationConfig', + ) + desired_master_version = proto.Field( + proto.STRING, + number=100, + ) + desired_database_encryption = proto.Field( + proto.MESSAGE, + number=46, + message='DatabaseEncryption', + ) + desired_workload_identity_config = proto.Field( + proto.MESSAGE, + number=47, + message='WorkloadIdentityConfig', + ) + desired_shielded_nodes = proto.Field( + proto.MESSAGE, + number=48, + message='ShieldedNodes', + ) + desired_master = proto.Field( + proto.MESSAGE, + number=52, + message='Master', + ) + desired_authenticator_groups_config = proto.Field( + proto.MESSAGE, + number=63, + message='AuthenticatorGroupsConfig', + ) + + +class Operation(proto.Message): + r"""This operation resource represents operations that may have + happened or are happening on the cluster. All fields are output + only. + + Attributes: + name (str): + The server-assigned ID for the operation. + zone (str): + The name of the Google Compute Engine + `zone `__ + in which the operation is taking place. This field is + deprecated, use location instead. + operation_type (google.container_v1beta1.types.Operation.Type): + The operation type. + status (google.container_v1beta1.types.Operation.Status): + The current status of the operation. + detail (str): + Detailed operation progress, if available. + status_message (str): + Output only. If an error has occurred, a + textual description of the error. Deprecated. + Use field error instead. + self_link (str): + Server-defined URL for the resource. + target_link (str): + Server-defined URL for the target of the + operation. + location (str): + [Output only] The name of the Google Compute Engine + `zone `__ + or + `region `__ + in which the cluster resides. + start_time (str): + [Output only] The time the operation started, in + `RFC3339 `__ text + format. + end_time (str): + [Output only] The time the operation completed, in + `RFC3339 `__ text + format. + progress (google.container_v1beta1.types.OperationProgress): + Output only. [Output only] Progress information for an + operation. + cluster_conditions (Sequence[google.container_v1beta1.types.StatusCondition]): + Which conditions caused the current cluster + state. Deprecated. Use field error instead. + nodepool_conditions (Sequence[google.container_v1beta1.types.StatusCondition]): + Which conditions caused the current node pool + state. Deprecated. Use field error instead. + error (google.rpc.status_pb2.Status): + The error result of the operation in case of + failure. + """ + class Status(proto.Enum): + r"""Current status of the operation.""" + STATUS_UNSPECIFIED = 0 + PENDING = 1 + RUNNING = 2 + DONE = 3 + ABORTING = 4 + + class Type(proto.Enum): + r"""Operation type.""" + TYPE_UNSPECIFIED = 0 + CREATE_CLUSTER = 1 + DELETE_CLUSTER = 2 + UPGRADE_MASTER = 3 + UPGRADE_NODES = 4 + REPAIR_CLUSTER = 5 + UPDATE_CLUSTER = 6 + CREATE_NODE_POOL = 7 + DELETE_NODE_POOL = 8 + SET_NODE_POOL_MANAGEMENT = 9 + AUTO_REPAIR_NODES = 10 + AUTO_UPGRADE_NODES = 11 + SET_LABELS = 12 + SET_MASTER_AUTH = 13 + SET_NODE_POOL_SIZE = 14 + SET_NETWORK_POLICY = 15 + SET_MAINTENANCE_POLICY = 16 + + name = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + operation_type = proto.Field( + proto.ENUM, + number=3, + enum=Type, + ) + status = proto.Field( + proto.ENUM, + number=4, + enum=Status, + ) + detail = proto.Field( + proto.STRING, + number=8, + ) + status_message = proto.Field( + proto.STRING, + number=5, + ) + self_link = proto.Field( + proto.STRING, + number=6, + ) + target_link = proto.Field( + proto.STRING, + number=7, + ) + location = proto.Field( + proto.STRING, + number=9, + ) + start_time = proto.Field( + proto.STRING, + number=10, + ) + end_time = proto.Field( + proto.STRING, + number=11, + ) + progress = proto.Field( + proto.MESSAGE, + number=12, + message='OperationProgress', + ) + cluster_conditions = proto.RepeatedField( + proto.MESSAGE, + number=13, + message='StatusCondition', + ) + nodepool_conditions = proto.RepeatedField( + proto.MESSAGE, + number=14, + message='StatusCondition', + ) + error = proto.Field( + proto.MESSAGE, + number=15, + message=status_pb2.Status, + ) + + +class OperationProgress(proto.Message): + r"""Information about operation (or operation stage) progress. + Attributes: + name (str): + A non-parameterized string describing an + operation stage. Unset for single-stage + operations. + status (google.container_v1beta1.types.Operation.Status): + Status of an operation stage. + Unset for single-stage operations. + metrics (Sequence[google.container_v1beta1.types.OperationProgress.Metric]): + Progress metric bundle, for example: metrics: [{name: "nodes + done", int_value: 15}, {name: "nodes total", int_value: 32}] + or metrics: [{name: "progress", double_value: 0.56}, {name: + "progress scale", double_value: 1.0}] + stages (Sequence[google.container_v1beta1.types.OperationProgress]): + Substages of an operation or a stage. + """ + + class Metric(proto.Message): + r"""Progress metric is (string, int|float|string) pair. + Attributes: + name (str): + Required. Metric name, e.g., "nodes total", + "percent done". + int_value (int): + For metrics with integer value. + double_value (float): + For metrics with floating point value. + string_value (str): + For metrics with custom values (ratios, + visual progress, etc.). + """ + + name = proto.Field( + proto.STRING, + number=1, + ) + int_value = proto.Field( + proto.INT64, + number=2, + oneof='value', + ) + double_value = proto.Field( + proto.DOUBLE, + number=3, + oneof='value', + ) + string_value = proto.Field( + proto.STRING, + number=4, + oneof='value', + ) + + name = proto.Field( + proto.STRING, + number=1, + ) + status = proto.Field( + proto.ENUM, + number=2, + enum='Operation.Status', + ) + metrics = proto.RepeatedField( + proto.MESSAGE, + number=3, + message=Metric, + ) + stages = proto.RepeatedField( + proto.MESSAGE, + number=4, + message='OperationProgress', + ) + + +class CreateClusterRequest(proto.Message): + r"""CreateClusterRequest creates a cluster. + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the parent + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the parent field. + cluster (google.container_v1beta1.types.Cluster): + Required. A `cluster + resource `__ + parent (str): + The parent (project and location) where the cluster will be + created. Specified in the format ``projects/*/locations/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster = proto.Field( + proto.MESSAGE, + number=3, + message='Cluster', + ) + parent = proto.Field( + proto.STRING, + number=5, + ) + + +class GetClusterRequest(proto.Message): + r"""GetClusterRequest gets the settings of a cluster. + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Required. Deprecated. The name of the cluster + to retrieve. This field has been deprecated and + replaced by the name field. + name (str): + The name (project, location, cluster) of the cluster to + retrieve. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster_id = proto.Field( + proto.STRING, + number=3, + ) + name = proto.Field( + proto.STRING, + number=5, + ) + + +class UpdateClusterRequest(proto.Message): + r"""UpdateClusterRequest updates the settings of a cluster. + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Required. Deprecated. The name of the cluster + to upgrade. This field has been deprecated and + replaced by the name field. + update (google.container_v1beta1.types.ClusterUpdate): + Required. A description of the update. + name (str): + The name (project, location, cluster) of the cluster to + update. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster_id = proto.Field( + proto.STRING, + number=3, + ) + update = proto.Field( + proto.MESSAGE, + number=4, + message='ClusterUpdate', + ) + name = proto.Field( + proto.STRING, + number=5, + ) + + +class UpdateNodePoolRequest(proto.Message): + r"""SetNodePoolVersionRequest updates the version of a node pool. + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Required. Deprecated. The name of the cluster + to upgrade. This field has been deprecated and + replaced by the name field. + node_pool_id (str): + Required. Deprecated. The name of the node + pool to upgrade. This field has been deprecated + and replaced by the name field. + node_version (str): + Required. The Kubernetes version to change + the nodes to (typically an upgrade). + + Users may specify either explicit versions + offered by Kubernetes Engine or version aliases, + which have the following behavior: + - "latest": picks the highest valid Kubernetes + version - "1.X": picks the highest valid + patch+gke.N patch in the 1.X version - "1.X.Y": + picks the highest valid gke.N patch in the 1.X.Y + version - "1.X.Y-gke.N": picks an explicit + Kubernetes version - "-": picks the Kubernetes + master version + image_type (str): + Required. The desired image type for the node + pool. + locations (Sequence[str]): + The desired list of Google Compute Engine + `zones `__ + in which the node pool's nodes should be located. Changing + the locations for a node pool will result in nodes being + either created or removed from the node pool, depending on + whether locations are being added or removed. + workload_metadata_config (google.container_v1beta1.types.WorkloadMetadataConfig): + The desired workload metadata config for the + node pool. + name (str): + The name (project, location, cluster, node pool) of the node + pool to update. Specified in the format + ``projects/*/locations/*/clusters/*/nodePools/*``. + upgrade_settings (google.container_v1beta1.types.NodePool.UpgradeSettings): + Upgrade settings control disruption and speed + of the upgrade. + linux_node_config (google.container_v1beta1.types.LinuxNodeConfig): + Parameters that can be configured on Linux + nodes. + kubelet_config (google.container_v1beta1.types.NodeKubeletConfig): + Node kubelet configs. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster_id = proto.Field( + proto.STRING, + number=3, + ) + node_pool_id = proto.Field( + proto.STRING, + number=4, + ) + node_version = proto.Field( + proto.STRING, + number=5, + ) + image_type = proto.Field( + proto.STRING, + number=6, + ) + locations = proto.RepeatedField( + proto.STRING, + number=13, + ) + workload_metadata_config = proto.Field( + proto.MESSAGE, + number=14, + message='WorkloadMetadataConfig', + ) + name = proto.Field( + proto.STRING, + number=8, + ) + upgrade_settings = proto.Field( + proto.MESSAGE, + number=15, + message='NodePool.UpgradeSettings', + ) + linux_node_config = proto.Field( + proto.MESSAGE, + number=19, + message='LinuxNodeConfig', + ) + kubelet_config = proto.Field( + proto.MESSAGE, + number=20, + message='NodeKubeletConfig', + ) + + +class SetNodePoolAutoscalingRequest(proto.Message): + r"""SetNodePoolAutoscalingRequest sets the autoscaler settings of + a node pool. + + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Required. Deprecated. The name of the cluster + to upgrade. This field has been deprecated and + replaced by the name field. + node_pool_id (str): + Required. Deprecated. The name of the node + pool to upgrade. This field has been deprecated + and replaced by the name field. + autoscaling (google.container_v1beta1.types.NodePoolAutoscaling): + Required. Autoscaling configuration for the + node pool. + name (str): + The name (project, location, cluster, node pool) of the node + pool to set autoscaler settings. Specified in the format + ``projects/*/locations/*/clusters/*/nodePools/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster_id = proto.Field( + proto.STRING, + number=3, + ) + node_pool_id = proto.Field( + proto.STRING, + number=4, + ) + autoscaling = proto.Field( + proto.MESSAGE, + number=5, + message='NodePoolAutoscaling', + ) + name = proto.Field( + proto.STRING, + number=6, + ) + + +class SetLoggingServiceRequest(proto.Message): + r"""SetLoggingServiceRequest sets the logging service of a + cluster. + + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Required. Deprecated. The name of the cluster + to upgrade. This field has been deprecated and + replaced by the name field. + logging_service (str): + Required. The logging service the cluster should use to + write logs. Currently available options: + + - ``logging.googleapis.com/kubernetes`` - The Cloud Logging + service with a Kubernetes-native resource model + - ``logging.googleapis.com`` - The legacy Cloud Logging + service (no longer available as of GKE 1.15). + - ``none`` - no logs will be exported from the cluster. + + If left as an empty + string,\ ``logging.googleapis.com/kubernetes`` will be used + for GKE 1.14+ or ``logging.googleapis.com`` for earlier + versions. + name (str): + The name (project, location, cluster) of the cluster to set + logging. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster_id = proto.Field( + proto.STRING, + number=3, + ) + logging_service = proto.Field( + proto.STRING, + number=4, + ) + name = proto.Field( + proto.STRING, + number=5, + ) + + +class SetMonitoringServiceRequest(proto.Message): + r"""SetMonitoringServiceRequest sets the monitoring service of a + cluster. + + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Required. Deprecated. The name of the cluster + to upgrade. This field has been deprecated and + replaced by the name field. + monitoring_service (str): + Required. The monitoring service the cluster should use to + write metrics. Currently available options: + + - "monitoring.googleapis.com/kubernetes" - The Cloud + Monitoring service with a Kubernetes-native resource + model + - ``monitoring.googleapis.com`` - The legacy Cloud + Monitoring service (no longer available as of GKE 1.15). + - ``none`` - No metrics will be exported from the cluster. + + If left as an empty + string,\ ``monitoring.googleapis.com/kubernetes`` will be + used for GKE 1.14+ or ``monitoring.googleapis.com`` for + earlier versions. + name (str): + The name (project, location, cluster) of the cluster to set + monitoring. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster_id = proto.Field( + proto.STRING, + number=3, + ) + monitoring_service = proto.Field( + proto.STRING, + number=4, + ) + name = proto.Field( + proto.STRING, + number=6, + ) + + +class SetAddonsConfigRequest(proto.Message): + r"""SetAddonsRequest sets the addons associated with the cluster. + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Required. Deprecated. The name of the cluster + to upgrade. This field has been deprecated and + replaced by the name field. + addons_config (google.container_v1beta1.types.AddonsConfig): + Required. The desired configurations for the + various addons available to run in the cluster. + name (str): + The name (project, location, cluster) of the cluster to set + addons. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster_id = proto.Field( + proto.STRING, + number=3, + ) + addons_config = proto.Field( + proto.MESSAGE, + number=4, + message='AddonsConfig', + ) + name = proto.Field( + proto.STRING, + number=6, + ) + + +class SetLocationsRequest(proto.Message): + r"""SetLocationsRequest sets the locations of the cluster. + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Required. Deprecated. The name of the cluster + to upgrade. This field has been deprecated and + replaced by the name field. + locations (Sequence[str]): + Required. The desired list of Google Compute Engine + `zones `__ + in which the cluster's nodes should be located. Changing the + locations a cluster is in will result in nodes being either + created or removed from the cluster, depending on whether + locations are being added or removed. + + This list must always include the cluster's primary zone. + name (str): + The name (project, location, cluster) of the cluster to set + locations. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster_id = proto.Field( + proto.STRING, + number=3, + ) + locations = proto.RepeatedField( + proto.STRING, + number=4, + ) + name = proto.Field( + proto.STRING, + number=6, + ) + + +class UpdateMasterRequest(proto.Message): + r"""UpdateMasterRequest updates the master of the cluster. + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Required. Deprecated. The name of the cluster + to upgrade. This field has been deprecated and + replaced by the name field. + master_version (str): + Required. The Kubernetes version to change + the master to. + Users may specify either explicit versions + offered by Kubernetes Engine or version aliases, + which have the following behavior: + - "latest": picks the highest valid Kubernetes + version - "1.X": picks the highest valid + patch+gke.N patch in the 1.X version - "1.X.Y": + picks the highest valid gke.N patch in the 1.X.Y + version - "1.X.Y-gke.N": picks an explicit + Kubernetes version - "-": picks the default + Kubernetes version + name (str): + The name (project, location, cluster) of the cluster to + update. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster_id = proto.Field( + proto.STRING, + number=3, + ) + master_version = proto.Field( + proto.STRING, + number=4, + ) + name = proto.Field( + proto.STRING, + number=7, + ) + + +class SetMasterAuthRequest(proto.Message): + r"""SetMasterAuthRequest updates the admin password of a cluster. + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Required. Deprecated. The name of the cluster + to upgrade. This field has been deprecated and + replaced by the name field. + action (google.container_v1beta1.types.SetMasterAuthRequest.Action): + Required. The exact form of action to be + taken on the master auth. + update (google.container_v1beta1.types.MasterAuth): + Required. A description of the update. + name (str): + The name (project, location, cluster) of the cluster to set + auth. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + class Action(proto.Enum): + r"""Operation type: what type update to perform.""" + UNKNOWN = 0 + SET_PASSWORD = 1 + GENERATE_PASSWORD = 2 + SET_USERNAME = 3 + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster_id = proto.Field( + proto.STRING, + number=3, + ) + action = proto.Field( + proto.ENUM, + number=4, + enum=Action, + ) + update = proto.Field( + proto.MESSAGE, + number=5, + message='MasterAuth', + ) + name = proto.Field( + proto.STRING, + number=7, + ) + + +class DeleteClusterRequest(proto.Message): + r"""DeleteClusterRequest deletes a cluster. + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Required. Deprecated. The name of the cluster + to delete. This field has been deprecated and + replaced by the name field. + name (str): + The name (project, location, cluster) of the cluster to + delete. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster_id = proto.Field( + proto.STRING, + number=3, + ) + name = proto.Field( + proto.STRING, + number=4, + ) + + +class ListClustersRequest(proto.Message): + r"""ListClustersRequest lists clusters. + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the parent + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides, or "-" for all zones. This + field has been deprecated and replaced by the parent field. + parent (str): + The parent (project and location) where the clusters will be + listed. Specified in the format ``projects/*/locations/*``. + Location "-" matches all zones and all regions. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + parent = proto.Field( + proto.STRING, + number=4, + ) + + +class ListClustersResponse(proto.Message): + r"""ListClustersResponse is the result of ListClustersRequest. + Attributes: + clusters (Sequence[google.container_v1beta1.types.Cluster]): + A list of clusters in the project in the + specified zone, or across all ones. + missing_zones (Sequence[str]): + If any zones are listed here, the list of + clusters returned may be missing those zones. + """ + + clusters = proto.RepeatedField( + proto.MESSAGE, + number=1, + message='Cluster', + ) + missing_zones = proto.RepeatedField( + proto.STRING, + number=2, + ) + + +class GetOperationRequest(proto.Message): + r"""GetOperationRequest gets a single operation. + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + operation_id (str): + Required. Deprecated. The server-assigned ``name`` of the + operation. This field has been deprecated and replaced by + the name field. + name (str): + The name (project, location, operation id) of the operation + to get. Specified in the format + ``projects/*/locations/*/operations/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + operation_id = proto.Field( + proto.STRING, + number=3, + ) + name = proto.Field( + proto.STRING, + number=5, + ) + + +class ListOperationsRequest(proto.Message): + r"""ListOperationsRequest lists operations. + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the parent + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + to return operations for, or ``-`` for all zones. This field + has been deprecated and replaced by the parent field. + parent (str): + The parent (project and location) where the operations will + be listed. Specified in the format + ``projects/*/locations/*``. Location "-" matches all zones + and all regions. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + parent = proto.Field( + proto.STRING, + number=4, + ) + + +class CancelOperationRequest(proto.Message): + r"""CancelOperationRequest cancels a single operation. + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the operation resides. This field has been + deprecated and replaced by the name field. + operation_id (str): + Required. Deprecated. The server-assigned ``name`` of the + operation. This field has been deprecated and replaced by + the name field. + name (str): + The name (project, location, operation id) of the operation + to cancel. Specified in the format + ``projects/*/locations/*/operations/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + operation_id = proto.Field( + proto.STRING, + number=3, + ) + name = proto.Field( + proto.STRING, + number=4, + ) + + +class ListOperationsResponse(proto.Message): + r"""ListOperationsResponse is the result of + ListOperationsRequest. + + Attributes: + operations (Sequence[google.container_v1beta1.types.Operation]): + A list of operations in the project in the + specified zone. + missing_zones (Sequence[str]): + If any zones are listed here, the list of + operations returned may be missing the + operations from those zones. + """ + + operations = proto.RepeatedField( + proto.MESSAGE, + number=1, + message='Operation', + ) + missing_zones = proto.RepeatedField( + proto.STRING, + number=2, + ) + + +class GetServerConfigRequest(proto.Message): + r"""Gets the current Kubernetes Engine service configuration. + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + to return operations for. This field has been deprecated and + replaced by the name field. + name (str): + The name (project and location) of the server config to get, + specified in the format ``projects/*/locations/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + name = proto.Field( + proto.STRING, + number=4, + ) + + +class ServerConfig(proto.Message): + r"""Kubernetes Engine service configuration. + Attributes: + default_cluster_version (str): + Version of Kubernetes the service deploys by + default. + valid_node_versions (Sequence[str]): + List of valid node upgrade target versions, + in descending order. + default_image_type (str): + Default image type. + valid_image_types (Sequence[str]): + List of valid image types. + valid_master_versions (Sequence[str]): + List of valid master versions, in descending + order. + channels (Sequence[google.container_v1beta1.types.ServerConfig.ReleaseChannelConfig]): + List of release channel configurations. + """ + + class ReleaseChannelConfig(proto.Message): + r"""ReleaseChannelConfig exposes configuration for a release + channel. + + Attributes: + channel (google.container_v1beta1.types.ReleaseChannel.Channel): + The release channel this configuration + applies to. + default_version (str): + The default version for newly created + clusters on the channel. + available_versions (Sequence[google.container_v1beta1.types.ServerConfig.ReleaseChannelConfig.AvailableVersion]): + Deprecated. This field has been deprecated and replaced with + the valid_versions field. + valid_versions (Sequence[str]): + List of valid versions for the channel. + """ + + class AvailableVersion(proto.Message): + r"""Deprecated. + Attributes: + version (str): + Kubernetes version. + reason (str): + Reason for availability. + """ + + version = proto.Field( + proto.STRING, + number=1, + ) + reason = proto.Field( + proto.STRING, + number=2, + ) + + channel = proto.Field( + proto.ENUM, + number=1, + enum='ReleaseChannel.Channel', + ) + default_version = proto.Field( + proto.STRING, + number=2, + ) + available_versions = proto.RepeatedField( + proto.MESSAGE, + number=3, + message='ServerConfig.ReleaseChannelConfig.AvailableVersion', + ) + valid_versions = proto.RepeatedField( + proto.STRING, + number=4, + ) + + default_cluster_version = proto.Field( + proto.STRING, + number=1, + ) + valid_node_versions = proto.RepeatedField( + proto.STRING, + number=3, + ) + default_image_type = proto.Field( + proto.STRING, + number=4, + ) + valid_image_types = proto.RepeatedField( + proto.STRING, + number=5, + ) + valid_master_versions = proto.RepeatedField( + proto.STRING, + number=6, + ) + channels = proto.RepeatedField( + proto.MESSAGE, + number=9, + message=ReleaseChannelConfig, + ) + + +class CreateNodePoolRequest(proto.Message): + r"""CreateNodePoolRequest creates a node pool for a cluster. + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the parent + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the parent field. + cluster_id (str): + Required. Deprecated. The name of the + cluster. This field has been deprecated and + replaced by the parent field. + node_pool (google.container_v1beta1.types.NodePool): + Required. The node pool to create. + parent (str): + The parent (project, location, cluster id) where the node + pool will be created. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster_id = proto.Field( + proto.STRING, + number=3, + ) + node_pool = proto.Field( + proto.MESSAGE, + number=4, + message='NodePool', + ) + parent = proto.Field( + proto.STRING, + number=6, + ) + + +class DeleteNodePoolRequest(proto.Message): + r"""DeleteNodePoolRequest deletes a node pool for a cluster. + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Required. Deprecated. The name of the + cluster. This field has been deprecated and + replaced by the name field. + node_pool_id (str): + Required. Deprecated. The name of the node + pool to delete. This field has been deprecated + and replaced by the name field. + name (str): + The name (project, location, cluster, node pool id) of the + node pool to delete. Specified in the format + ``projects/*/locations/*/clusters/*/nodePools/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster_id = proto.Field( + proto.STRING, + number=3, + ) + node_pool_id = proto.Field( + proto.STRING, + number=4, + ) + name = proto.Field( + proto.STRING, + number=6, + ) + + +class ListNodePoolsRequest(proto.Message): + r"""ListNodePoolsRequest lists the node pool(s) for a cluster. + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the parent + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the parent field. + cluster_id (str): + Required. Deprecated. The name of the + cluster. This field has been deprecated and + replaced by the parent field. + parent (str): + The parent (project, location, cluster id) where the node + pools will be listed. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster_id = proto.Field( + proto.STRING, + number=3, + ) + parent = proto.Field( + proto.STRING, + number=5, + ) + + +class GetNodePoolRequest(proto.Message): + r"""GetNodePoolRequest retrieves a node pool for a cluster. + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Required. Deprecated. The name of the + cluster. This field has been deprecated and + replaced by the name field. + node_pool_id (str): + Required. Deprecated. The name of the node + pool. This field has been deprecated and + replaced by the name field. + name (str): + The name (project, location, cluster, node pool id) of the + node pool to get. Specified in the format + ``projects/*/locations/*/clusters/*/nodePools/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster_id = proto.Field( + proto.STRING, + number=3, + ) + node_pool_id = proto.Field( + proto.STRING, + number=4, + ) + name = proto.Field( + proto.STRING, + number=6, + ) + + +class NodePool(proto.Message): + r"""NodePool contains the name and configuration for a cluster's + node pool. Node pools are a set of nodes (i.e. VM's), with a + common configuration and specification, under the control of the + cluster master. They may have a set of Kubernetes labels applied + to them, which may be used to reference them during pod + scheduling. They may also be resized up or down, to accommodate + the workload. + + Attributes: + name (str): + The name of the node pool. + config (google.container_v1beta1.types.NodeConfig): + The node configuration of the pool. + initial_node_count (int): + The initial node count for the pool. You must ensure that + your Compute Engine `resource + quota `__ is + sufficient for this number of instances. You must also have + available firewall and routes quota. + locations (Sequence[str]): + The list of Google Compute Engine + `zones `__ + in which the NodePool's nodes should be located. + + If this value is unspecified during node pool creation, the + `Cluster.Locations `__ + value will be used, instead. + + Warning: changing node pool locations will result in nodes + being added and/or removed. + self_link (str): + [Output only] Server-defined URL for the resource. + version (str): + The version of the Kubernetes of this node. + instance_group_urls (Sequence[str]): + [Output only] The resource URLs of the `managed instance + groups `__ + associated with this node pool. + status (google.container_v1beta1.types.NodePool.Status): + [Output only] The status of the nodes in this pool instance. + status_message (str): + [Output only] Deprecated. Use conditions instead. Additional + information about the current status of this node pool + instance, if available. + autoscaling (google.container_v1beta1.types.NodePoolAutoscaling): + Autoscaler configuration for this NodePool. + Autoscaler is enabled only if a valid + configuration is present. + management (google.container_v1beta1.types.NodeManagement): + NodeManagement configuration for this + NodePool. + max_pods_constraint (google.container_v1beta1.types.MaxPodsConstraint): + The constraint on the maximum number of pods + that can be run simultaneously on a node in the + node pool. + conditions (Sequence[google.container_v1beta1.types.StatusCondition]): + Which conditions caused the current node pool + state. + pod_ipv4_cidr_size (int): + [Output only] The pod CIDR block size per node in this node + pool. + upgrade_settings (google.container_v1beta1.types.NodePool.UpgradeSettings): + Upgrade settings control disruption and speed + of the upgrade. + """ + class Status(proto.Enum): + r"""The current status of the node pool instance.""" + STATUS_UNSPECIFIED = 0 + PROVISIONING = 1 + RUNNING = 2 + RUNNING_WITH_ERROR = 3 + RECONCILING = 4 + STOPPING = 5 + ERROR = 6 + + class UpgradeSettings(proto.Message): + r"""These upgrade settings control the level of parallelism and + the level of disruption caused by an upgrade. + + maxUnavailable controls the number of nodes that can be + simultaneously unavailable. + + maxSurge controls the number of additional nodes that can be + added to the node pool temporarily for the time of the upgrade + to increase the number of available nodes. + + (maxUnavailable + maxSurge) determines the level of parallelism + (how many nodes are being upgraded at the same time). + + Note: upgrades inevitably introduce some disruption since + workloads need to be moved from old nodes to new, upgraded ones. + Even if maxUnavailable=0, this holds true. (Disruption stays + within the limits of PodDisruptionBudget, if it is configured.) + + Consider a hypothetical node pool with 5 nodes having + maxSurge=2, maxUnavailable=1. This means the upgrade process + upgrades 3 nodes simultaneously. It creates 2 additional + (upgraded) nodes, then it brings down 3 old (not yet upgraded) + nodes at the same time. This ensures that there are always at + least 4 nodes available. + + Attributes: + max_surge (int): + The maximum number of nodes that can be + created beyond the current size of the node pool + during the upgrade process. + max_unavailable (int): + The maximum number of nodes that can be + simultaneously unavailable during the upgrade + process. A node is considered available if its + status is Ready. + """ + + max_surge = proto.Field( + proto.INT32, + number=1, + ) + max_unavailable = proto.Field( + proto.INT32, + number=2, + ) + + name = proto.Field( + proto.STRING, + number=1, + ) + config = proto.Field( + proto.MESSAGE, + number=2, + message='NodeConfig', + ) + initial_node_count = proto.Field( + proto.INT32, + number=3, + ) + locations = proto.RepeatedField( + proto.STRING, + number=13, + ) + self_link = proto.Field( + proto.STRING, + number=100, + ) + version = proto.Field( + proto.STRING, + number=101, + ) + instance_group_urls = proto.RepeatedField( + proto.STRING, + number=102, + ) + status = proto.Field( + proto.ENUM, + number=103, + enum=Status, + ) + status_message = proto.Field( + proto.STRING, + number=104, + ) + autoscaling = proto.Field( + proto.MESSAGE, + number=4, + message='NodePoolAutoscaling', + ) + management = proto.Field( + proto.MESSAGE, + number=5, + message='NodeManagement', + ) + max_pods_constraint = proto.Field( + proto.MESSAGE, + number=6, + message='MaxPodsConstraint', + ) + conditions = proto.RepeatedField( + proto.MESSAGE, + number=105, + message='StatusCondition', + ) + pod_ipv4_cidr_size = proto.Field( + proto.INT32, + number=7, + ) + upgrade_settings = proto.Field( + proto.MESSAGE, + number=107, + message=UpgradeSettings, + ) + + +class NodeManagement(proto.Message): + r"""NodeManagement defines the set of node management services + turned on for the node pool. + + Attributes: + auto_upgrade (bool): + Whether the nodes will be automatically + upgraded. + auto_repair (bool): + Whether the nodes will be automatically + repaired. + upgrade_options (google.container_v1beta1.types.AutoUpgradeOptions): + Specifies the Auto Upgrade knobs for the node + pool. + """ + + auto_upgrade = proto.Field( + proto.BOOL, + number=1, + ) + auto_repair = proto.Field( + proto.BOOL, + number=2, + ) + upgrade_options = proto.Field( + proto.MESSAGE, + number=10, + message='AutoUpgradeOptions', + ) + + +class AutoUpgradeOptions(proto.Message): + r"""AutoUpgradeOptions defines the set of options for the user to + control how the Auto Upgrades will proceed. + + Attributes: + auto_upgrade_start_time (str): + [Output only] This field is set when upgrades are about to + commence with the approximate start time for the upgrades, + in `RFC3339 `__ text + format. + description (str): + [Output only] This field is set when upgrades are about to + commence with the description of the upgrade. + """ + + auto_upgrade_start_time = proto.Field( + proto.STRING, + number=1, + ) + description = proto.Field( + proto.STRING, + number=2, + ) + + +class MaintenancePolicy(proto.Message): + r"""MaintenancePolicy defines the maintenance policy to be used + for the cluster. + + Attributes: + window (google.container_v1beta1.types.MaintenanceWindow): + Specifies the maintenance window in which + maintenance may be performed. + resource_version (str): + A hash identifying the version of this policy, so that + updates to fields of the policy won't accidentally undo + intermediate changes (and so that users of the API unaware + of some fields won't accidentally remove other fields). Make + a ``get()`` request to the cluster to get the current + resource version and include it with requests to set the + policy. + """ + + window = proto.Field( + proto.MESSAGE, + number=1, + message='MaintenanceWindow', + ) + resource_version = proto.Field( + proto.STRING, + number=3, + ) + + +class MaintenanceWindow(proto.Message): + r"""MaintenanceWindow defines the maintenance window to be used + for the cluster. + + Attributes: + daily_maintenance_window (google.container_v1beta1.types.DailyMaintenanceWindow): + DailyMaintenanceWindow specifies a daily + maintenance operation window. + recurring_window (google.container_v1beta1.types.RecurringTimeWindow): + RecurringWindow specifies some number of + recurring time periods for maintenance to occur. + The time windows may be overlapping. If no + maintenance windows are set, maintenance can + occur at any time. + maintenance_exclusions (Sequence[google.container_v1beta1.types.MaintenanceWindow.MaintenanceExclusionsEntry]): + Exceptions to maintenance window. Non- + mergency maintenance should not occur in these + windows. + """ + + daily_maintenance_window = proto.Field( + proto.MESSAGE, + number=2, + oneof='policy', + message='DailyMaintenanceWindow', + ) + recurring_window = proto.Field( + proto.MESSAGE, + number=3, + oneof='policy', + message='RecurringTimeWindow', + ) + maintenance_exclusions = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=4, + message='TimeWindow', + ) + + +class TimeWindow(proto.Message): + r"""Represents an arbitrary window of time. + Attributes: + start_time (google.protobuf.timestamp_pb2.Timestamp): + The time that the window first starts. + end_time (google.protobuf.timestamp_pb2.Timestamp): + The time that the window ends. The end time + should take place after the start time. + """ + + start_time = proto.Field( + proto.MESSAGE, + number=1, + message=timestamp_pb2.Timestamp, + ) + end_time = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + + +class RecurringTimeWindow(proto.Message): + r"""Represents an arbitrary window of time that recurs. + Attributes: + window (google.container_v1beta1.types.TimeWindow): + The window of the first recurrence. + recurrence (str): + An RRULE + (https://tools.ietf.org/html/rfc5545#section-3.8.5.3) for + how this window reccurs. They go on for the span of time + between the start and end time. + + For example, to have something repeat every weekday, you'd + use: ``FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR`` + + To repeat some window daily (equivalent to the + DailyMaintenanceWindow): ``FREQ=DAILY`` + + For the first weekend of every month: + ``FREQ=MONTHLY;BYSETPOS=1;BYDAY=SA,SU`` + + This specifies how frequently the window starts. Eg, if you + wanted to have a 9-5 UTC-4 window every weekday, you'd use + something like: + + :: + + start time = 2019-01-01T09:00:00-0400 + end time = 2019-01-01T17:00:00-0400 + recurrence = FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR + + Windows can span multiple days. Eg, to make the window + encompass every weekend from midnight Saturday till the last + minute of Sunday UTC: + + :: + + start time = 2019-01-05T00:00:00Z + end time = 2019-01-07T23:59:00Z + recurrence = FREQ=WEEKLY;BYDAY=SA + + Note the start and end time's specific dates are largely + arbitrary except to specify duration of the window and when + it first starts. The FREQ values of HOURLY, MINUTELY, and + SECONDLY are not supported. + """ + + window = proto.Field( + proto.MESSAGE, + number=1, + message='TimeWindow', + ) + recurrence = proto.Field( + proto.STRING, + number=2, + ) + + +class DailyMaintenanceWindow(proto.Message): + r"""Time window specified for daily maintenance operations. + Attributes: + start_time (str): + Time within the maintenance window to start the maintenance + operations. It must be in format "HH:MM", where HH : [00-23] + and MM : [00-59] GMT. + duration (str): + [Output only] Duration of the time window, automatically + chosen to be smallest possible in the given scenario. + """ + + start_time = proto.Field( + proto.STRING, + number=2, + ) + duration = proto.Field( + proto.STRING, + number=3, + ) + + +class SetNodePoolManagementRequest(proto.Message): + r"""SetNodePoolManagementRequest sets the node management + properties of a node pool. + + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Required. Deprecated. The name of the cluster + to update. This field has been deprecated and + replaced by the name field. + node_pool_id (str): + Required. Deprecated. The name of the node + pool to update. This field has been deprecated + and replaced by the name field. + management (google.container_v1beta1.types.NodeManagement): + Required. NodeManagement configuration for + the node pool. + name (str): + The name (project, location, cluster, node pool id) of the + node pool to set management properties. Specified in the + format ``projects/*/locations/*/clusters/*/nodePools/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster_id = proto.Field( + proto.STRING, + number=3, + ) + node_pool_id = proto.Field( + proto.STRING, + number=4, + ) + management = proto.Field( + proto.MESSAGE, + number=5, + message='NodeManagement', + ) + name = proto.Field( + proto.STRING, + number=7, + ) + + +class SetNodePoolSizeRequest(proto.Message): + r"""SetNodePoolSizeRequest sets the size a node + pool. + + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Required. Deprecated. The name of the cluster + to update. This field has been deprecated and + replaced by the name field. + node_pool_id (str): + Required. Deprecated. The name of the node + pool to update. This field has been deprecated + and replaced by the name field. + node_count (int): + Required. The desired node count for the + pool. + name (str): + The name (project, location, cluster, node pool id) of the + node pool to set size. Specified in the format + ``projects/*/locations/*/clusters/*/nodePools/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster_id = proto.Field( + proto.STRING, + number=3, + ) + node_pool_id = proto.Field( + proto.STRING, + number=4, + ) + node_count = proto.Field( + proto.INT32, + number=5, + ) + name = proto.Field( + proto.STRING, + number=7, + ) + + +class RollbackNodePoolUpgradeRequest(proto.Message): + r"""RollbackNodePoolUpgradeRequest rollbacks the previously + Aborted or Failed NodePool upgrade. This will be an no-op if the + last upgrade successfully completed. + + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Required. Deprecated. The name of the cluster + to rollback. This field has been deprecated and + replaced by the name field. + node_pool_id (str): + Required. Deprecated. The name of the node + pool to rollback. This field has been deprecated + and replaced by the name field. + name (str): + The name (project, location, cluster, node pool id) of the + node poll to rollback upgrade. Specified in the format + ``projects/*/locations/*/clusters/*/nodePools/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster_id = proto.Field( + proto.STRING, + number=3, + ) + node_pool_id = proto.Field( + proto.STRING, + number=4, + ) + name = proto.Field( + proto.STRING, + number=6, + ) + + +class ListNodePoolsResponse(proto.Message): + r"""ListNodePoolsResponse is the result of ListNodePoolsRequest. + Attributes: + node_pools (Sequence[google.container_v1beta1.types.NodePool]): + A list of node pools for a cluster. + """ + + node_pools = proto.RepeatedField( + proto.MESSAGE, + number=1, + message='NodePool', + ) + + +class ClusterAutoscaling(proto.Message): + r"""ClusterAutoscaling contains global, per-cluster information + required by Cluster Autoscaler to automatically adjust the size + of the cluster and create/delete + node pools based on the current needs. + + Attributes: + enable_node_autoprovisioning (bool): + Enables automatic node pool creation and + deletion. + resource_limits (Sequence[google.container_v1beta1.types.ResourceLimit]): + Contains global constraints regarding minimum + and maximum amount of resources in the cluster. + autoscaling_profile (google.container_v1beta1.types.ClusterAutoscaling.AutoscalingProfile): + Defines autoscaling behaviour. + autoprovisioning_node_pool_defaults (google.container_v1beta1.types.AutoprovisioningNodePoolDefaults): + AutoprovisioningNodePoolDefaults contains + defaults for a node pool created by NAP. + autoprovisioning_locations (Sequence[str]): + The list of Google Compute Engine + `zones `__ + in which the NodePool's nodes can be created by NAP. + """ + class AutoscalingProfile(proto.Enum): + r"""Defines possible options for autoscaling_profile field.""" + PROFILE_UNSPECIFIED = 0 + OPTIMIZE_UTILIZATION = 1 + BALANCED = 2 + + enable_node_autoprovisioning = proto.Field( + proto.BOOL, + number=1, + ) + resource_limits = proto.RepeatedField( + proto.MESSAGE, + number=2, + message='ResourceLimit', + ) + autoscaling_profile = proto.Field( + proto.ENUM, + number=3, + enum=AutoscalingProfile, + ) + autoprovisioning_node_pool_defaults = proto.Field( + proto.MESSAGE, + number=4, + message='AutoprovisioningNodePoolDefaults', + ) + autoprovisioning_locations = proto.RepeatedField( + proto.STRING, + number=5, + ) + + +class AutoprovisioningNodePoolDefaults(proto.Message): + r"""AutoprovisioningNodePoolDefaults contains defaults for a node + pool created by NAP. + + Attributes: + oauth_scopes (Sequence[str]): + The set of Google API scopes to be made available on all of + the node VMs under the "default" service account. + + The following scopes are recommended, but not required, and + by default are not included: + + - ``https://www.googleapis.com/auth/compute`` is required + for mounting persistent storage on your nodes. + - ``https://www.googleapis.com/auth/devstorage.read_only`` + is required for communicating with **gcr.io** (the + `Google Container + Registry `__). + + If unspecified, no scopes are added, unless Cloud Logging or + Cloud Monitoring are enabled, in which case their required + scopes will be added. + service_account (str): + The Google Cloud Platform Service Account to + be used by the node VMs. Specify the email + address of the Service Account; otherwise, if no + Service Account is specified, the "default" + service account is used. + upgrade_settings (google.container_v1beta1.types.NodePool.UpgradeSettings): + Upgrade settings control disruption and speed + of the upgrade. + management (google.container_v1beta1.types.NodeManagement): + NodeManagement configuration for this + NodePool. + min_cpu_platform (str): + Minimum CPU platform to be used by this instance. The + instance may be scheduled on the specified or newer CPU + platform. Applicable values are the friendly names of CPU + platforms, such as ``minCpuPlatform: "Intel Haswell"`` or + ``minCpuPlatform: "Intel Sandy Bridge"``. For more + information, read `how to specify min CPU + platform `__ + To unset the min cpu platform field pass "automatic" as + field value. + disk_size_gb (int): + Size of the disk attached to each node, + specified in GB. The smallest allowed disk size + is 10GB. + If unspecified, the default disk size is 100GB. + disk_type (str): + Type of the disk attached to each node (e.g. + 'pd-standard', 'pd-ssd' or 'pd-balanced') + + If unspecified, the default disk type is 'pd- + standard' + shielded_instance_config (google.container_v1beta1.types.ShieldedInstanceConfig): + Shielded Instance options. + boot_disk_kms_key (str): + The Customer Managed Encryption Key used to encrypt the boot + disk attached to each node in the node pool. This should be + of the form + projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. + For more information about protecting resources with Cloud + KMS Keys please see: + https://cloud.google.com/compute/docs/disks/customer-managed-encryption + image_type (str): + The image type to use for node created by + NodeAutoprovisioning. + """ + + oauth_scopes = proto.RepeatedField( + proto.STRING, + number=1, + ) + service_account = proto.Field( + proto.STRING, + number=2, + ) + upgrade_settings = proto.Field( + proto.MESSAGE, + number=3, + message='NodePool.UpgradeSettings', + ) + management = proto.Field( + proto.MESSAGE, + number=4, + message='NodeManagement', + ) + min_cpu_platform = proto.Field( + proto.STRING, + number=5, + ) + disk_size_gb = proto.Field( + proto.INT32, + number=6, + ) + disk_type = proto.Field( + proto.STRING, + number=7, + ) + shielded_instance_config = proto.Field( + proto.MESSAGE, + number=8, + message='ShieldedInstanceConfig', + ) + boot_disk_kms_key = proto.Field( + proto.STRING, + number=9, + ) + image_type = proto.Field( + proto.STRING, + number=10, + ) + + +class ResourceLimit(proto.Message): + r"""Contains information about amount of some resource in the + cluster. For memory, value should be in GB. + + Attributes: + resource_type (str): + Resource name "cpu", "memory" or gpu-specific + string. + minimum (int): + Minimum amount of the resource in the + cluster. + maximum (int): + Maximum amount of the resource in the + cluster. + """ + + resource_type = proto.Field( + proto.STRING, + number=1, + ) + minimum = proto.Field( + proto.INT64, + number=2, + ) + maximum = proto.Field( + proto.INT64, + number=3, + ) + + +class NodePoolAutoscaling(proto.Message): + r"""NodePoolAutoscaling contains information required by cluster + autoscaler to adjust the size of the node pool to the current + cluster usage. + + Attributes: + enabled (bool): + Is autoscaling enabled for this node pool. + min_node_count (int): + Minimum number of nodes in the NodePool. Must be >= 1 and <= + max_node_count. + max_node_count (int): + Maximum number of nodes in the NodePool. Must be >= + min_node_count. There has to enough quota to scale up the + cluster. + autoprovisioned (bool): + Can this node pool be deleted automatically. + """ + + enabled = proto.Field( + proto.BOOL, + number=1, + ) + min_node_count = proto.Field( + proto.INT32, + number=2, + ) + max_node_count = proto.Field( + proto.INT32, + number=3, + ) + autoprovisioned = proto.Field( + proto.BOOL, + number=4, + ) + + +class SetLabelsRequest(proto.Message): + r"""SetLabelsRequest sets the Google Cloud Platform labels on a + Google Container Engine cluster, which will in turn set them for + Google Compute Engine resources used by that cluster + + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Required. Deprecated. The name of the + cluster. This field has been deprecated and + replaced by the name field. + resource_labels (Sequence[google.container_v1beta1.types.SetLabelsRequest.ResourceLabelsEntry]): + Required. The labels to set for that cluster. + label_fingerprint (str): + Required. The fingerprint of the previous set of labels for + this resource, used to detect conflicts. The fingerprint is + initially generated by Kubernetes Engine and changes after + every request to modify or update labels. You must always + provide an up-to-date fingerprint hash when updating or + changing labels. Make a ``get()`` request to the resource to + get the latest fingerprint. + name (str): + The name (project, location, cluster id) of the cluster to + set labels. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster_id = proto.Field( + proto.STRING, + number=3, + ) + resource_labels = proto.MapField( + proto.STRING, + proto.STRING, + number=4, + ) + label_fingerprint = proto.Field( + proto.STRING, + number=5, + ) + name = proto.Field( + proto.STRING, + number=7, + ) + + +class SetLegacyAbacRequest(proto.Message): + r"""SetLegacyAbacRequest enables or disables the ABAC + authorization mechanism for a cluster. + + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Required. Deprecated. The name of the cluster + to update. This field has been deprecated and + replaced by the name field. + enabled (bool): + Required. Whether ABAC authorization will be + enabled in the cluster. + name (str): + The name (project, location, cluster id) of the cluster to + set legacy abac. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster_id = proto.Field( + proto.STRING, + number=3, + ) + enabled = proto.Field( + proto.BOOL, + number=4, + ) + name = proto.Field( + proto.STRING, + number=6, + ) + + +class StartIPRotationRequest(proto.Message): + r"""StartIPRotationRequest creates a new IP for the cluster and + then performs a node upgrade on each node pool to point to the + new IP. + + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Required. Deprecated. The name of the + cluster. This field has been deprecated and + replaced by the name field. + name (str): + The name (project, location, cluster id) of the cluster to + start IP rotation. Specified in the format + ``projects/*/locations/*/clusters/*``. + rotate_credentials (bool): + Whether to rotate credentials during IP + rotation. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster_id = proto.Field( + proto.STRING, + number=3, + ) + name = proto.Field( + proto.STRING, + number=6, + ) + rotate_credentials = proto.Field( + proto.BOOL, + number=7, + ) + + +class CompleteIPRotationRequest(proto.Message): + r"""CompleteIPRotationRequest moves the cluster master back into + single-IP mode. + + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Required. Deprecated. The name of the + cluster. This field has been deprecated and + replaced by the name field. + name (str): + The name (project, location, cluster id) of the cluster to + complete IP rotation. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster_id = proto.Field( + proto.STRING, + number=3, + ) + name = proto.Field( + proto.STRING, + number=7, + ) + + +class AcceleratorConfig(proto.Message): + r"""AcceleratorConfig represents a Hardware Accelerator request. + Attributes: + accelerator_count (int): + The number of the accelerator cards exposed + to an instance. + accelerator_type (str): + The accelerator type resource name. List of supported + accelerators + `here `__ + """ + + accelerator_count = proto.Field( + proto.INT64, + number=1, + ) + accelerator_type = proto.Field( + proto.STRING, + number=2, + ) + + +class WorkloadMetadataConfig(proto.Message): + r"""WorkloadMetadataConfig defines the metadata configuration to + expose to workloads on the node pool. + + Attributes: + node_metadata (google.container_v1beta1.types.WorkloadMetadataConfig.NodeMetadata): + NodeMetadata is the configuration for how to + expose metadata to the workloads running on the + node. + mode (google.container_v1beta1.types.WorkloadMetadataConfig.Mode): + Mode is the configuration for how to expose + metadata to workloads running on the node pool. + """ + class NodeMetadata(proto.Enum): + r"""NodeMetadata is the configuration for if and how to expose + the node metadata to the workload running on the node. + """ + UNSPECIFIED = 0 + SECURE = 1 + EXPOSE = 2 + GKE_METADATA_SERVER = 3 + + class Mode(proto.Enum): + r"""Mode is the configuration for how to expose metadata to + workloads running on the node. + """ + MODE_UNSPECIFIED = 0 + GCE_METADATA = 1 + GKE_METADATA = 2 + + node_metadata = proto.Field( + proto.ENUM, + number=1, + enum=NodeMetadata, + ) + mode = proto.Field( + proto.ENUM, + number=2, + enum=Mode, + ) + + +class SetNetworkPolicyRequest(proto.Message): + r"""SetNetworkPolicyRequest enables/disables network policy for a + cluster. + + Attributes: + project_id (str): + Required. Deprecated. The Google Developers Console `project + ID or project + number `__. + This field has been deprecated and replaced by the name + field. + zone (str): + Required. Deprecated. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. This field has been deprecated + and replaced by the name field. + cluster_id (str): + Required. Deprecated. The name of the + cluster. This field has been deprecated and + replaced by the name field. + network_policy (google.container_v1beta1.types.NetworkPolicy): + Required. Configuration options for the + NetworkPolicy feature. + name (str): + The name (project, location, cluster id) of the cluster to + set networking policy. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster_id = proto.Field( + proto.STRING, + number=3, + ) + network_policy = proto.Field( + proto.MESSAGE, + number=4, + message='NetworkPolicy', + ) + name = proto.Field( + proto.STRING, + number=6, + ) + + +class SetMaintenancePolicyRequest(proto.Message): + r"""SetMaintenancePolicyRequest sets the maintenance policy for a + cluster. + + Attributes: + project_id (str): + Required. The Google Developers Console `project ID or + project + number `__. + zone (str): + Required. The name of the Google Compute Engine + `zone `__ + in which the cluster resides. + cluster_id (str): + Required. The name of the cluster to update. + maintenance_policy (google.container_v1beta1.types.MaintenancePolicy): + Required. The maintenance policy to be set + for the cluster. An empty field clears the + existing maintenance policy. + name (str): + The name (project, location, cluster id) of the cluster to + set maintenance policy. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + project_id = proto.Field( + proto.STRING, + number=1, + ) + zone = proto.Field( + proto.STRING, + number=2, + ) + cluster_id = proto.Field( + proto.STRING, + number=3, + ) + maintenance_policy = proto.Field( + proto.MESSAGE, + number=4, + message='MaintenancePolicy', + ) + name = proto.Field( + proto.STRING, + number=5, + ) + + +class ListLocationsRequest(proto.Message): + r"""ListLocationsRequest is used to request the locations that + offer GKE. + + Attributes: + parent (str): + Required. Contains the name of the resource requested. + Specified in the format ``projects/*``. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + + +class ListLocationsResponse(proto.Message): + r"""ListLocationsResponse returns the list of all GKE locations + and their recommendation state. + + Attributes: + locations (Sequence[google.container_v1beta1.types.Location]): + A full list of GKE locations. + next_page_token (str): + Only return ListLocationsResponse that occur after the + page_token. This value should be populated from the + ListLocationsResponse.next_page_token if that response token + was set (which happens when listing more Locations than fit + in a single ListLocationsResponse). + """ + + @property + def raw_page(self): + return self + + locations = proto.RepeatedField( + proto.MESSAGE, + number=1, + message='Location', + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class Location(proto.Message): + r"""Location returns the location name, and if the location is + recommended for GKE cluster scheduling. + + Attributes: + type_ (google.container_v1beta1.types.Location.LocationType): + Contains the type of location this Location + is for. Regional or Zonal. + name (str): + Contains the name of the resource requested. Specified in + the format ``projects/*/locations/*``. + recommended (bool): + Whether the location is recomended for GKE + cluster scheduling. + """ + class LocationType(proto.Enum): + r"""LocationType is the type of GKE location, regional or zonal.""" + LOCATION_TYPE_UNSPECIFIED = 0 + ZONE = 1 + REGION = 2 + + type_ = proto.Field( + proto.ENUM, + number=1, + enum=LocationType, + ) + name = proto.Field( + proto.STRING, + number=2, + ) + recommended = proto.Field( + proto.BOOL, + number=3, + ) + + +class StatusCondition(proto.Message): + r"""StatusCondition describes why a cluster or a node pool has a + certain status (e.g., ERROR or DEGRADED). + + Attributes: + code (google.container_v1beta1.types.StatusCondition.Code): + Machine-friendly representation of the condition Deprecated. + Use canonical_code instead. + message (str): + Human-friendly representation of the + condition + canonical_code (google.rpc.code_pb2.Code): + Canonical code of the condition. + """ + class Code(proto.Enum): + r"""Code for each condition""" + UNKNOWN = 0 + GCE_STOCKOUT = 1 + GKE_SERVICE_ACCOUNT_DELETED = 2 + GCE_QUOTA_EXCEEDED = 3 + SET_BY_OPERATOR = 4 + CLOUD_KMS_KEY_ERROR = 7 + + code = proto.Field( + proto.ENUM, + number=1, + enum=Code, + ) + message = proto.Field( + proto.STRING, + number=2, + ) + canonical_code = proto.Field( + proto.ENUM, + number=3, + enum=code_pb2.Code, + ) + + +class NetworkConfig(proto.Message): + r"""NetworkConfig reports the relative names of network & + subnetwork. + + Attributes: + network (str): + Output only. The relative name of the Google Compute Engine + [network]`google.container.v1beta1.NetworkConfig.network `__ + to which the cluster is connected. Example: + projects/my-project/global/networks/my-network + subnetwork (str): + Output only. The relative name of the Google Compute Engine + `subnetwork `__ + to which the cluster is connected. Example: + projects/my-project/regions/us-central1/subnetworks/my-subnet + enable_intra_node_visibility (bool): + Whether Intra-node visibility is enabled for + this cluster. This makes same node pod to pod + traffic visible for VPC network. + default_snat_status (google.container_v1beta1.types.DefaultSnatStatus): + Whether the cluster disables default in-node sNAT rules. + In-node sNAT rules will be disabled when default_snat_status + is disabled. When disabled is set to false, default IP + masquerade rules will be applied to the nodes to prevent + sNAT on cluster internal traffic. + datapath_provider (google.container_v1beta1.types.DatapathProvider): + The desired datapath provider for this + cluster. By default, uses the IPTables-based + kube-proxy implementation. + """ + + network = proto.Field( + proto.STRING, + number=1, + ) + subnetwork = proto.Field( + proto.STRING, + number=2, + ) + enable_intra_node_visibility = proto.Field( + proto.BOOL, + number=5, + ) + default_snat_status = proto.Field( + proto.MESSAGE, + number=7, + message='DefaultSnatStatus', + ) + datapath_provider = proto.Field( + proto.ENUM, + number=11, + enum='DatapathProvider', + ) + + +class ListUsableSubnetworksRequest(proto.Message): + r"""ListUsableSubnetworksRequest requests the list of usable + subnetworks. available to a user for creating clusters. + + Attributes: + parent (str): + Required. The parent project where subnetworks are usable. + Specified in the format ``projects/*``. + filter (str): + Filtering currently only supports equality on the + networkProjectId and must be in the form: + "networkProjectId=[PROJECTID]", where ``networkProjectId`` + is the project which owns the listed subnetworks. This + defaults to the parent project ID. + page_size (int): + The max number of results per page that should be returned. + If the number of available results is larger than + ``page_size``, a ``next_page_token`` is returned which can + be used to get the next page of results in subsequent + requests. Acceptable values are 0 to 500, inclusive. + (Default: 500) + page_token (str): + Specifies a page token to use. Set this to + the nextPageToken returned by previous list + requests to get the next page of results. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + filter = proto.Field( + proto.STRING, + number=2, + ) + page_size = proto.Field( + proto.INT32, + number=3, + ) + page_token = proto.Field( + proto.STRING, + number=4, + ) + + +class ListUsableSubnetworksResponse(proto.Message): + r"""ListUsableSubnetworksResponse is the response of + ListUsableSubnetworksRequest. + + Attributes: + subnetworks (Sequence[google.container_v1beta1.types.UsableSubnetwork]): + A list of usable subnetworks in the specified + network project. + next_page_token (str): + This token allows you to get the next page of results for + list requests. If the number of results is larger than + ``page_size``, use the ``next_page_token`` as a value for + the query parameter ``page_token`` in the next request. The + value will become empty when there are no more pages. + """ + + @property + def raw_page(self): + return self + + subnetworks = proto.RepeatedField( + proto.MESSAGE, + number=1, + message='UsableSubnetwork', + ) + next_page_token = proto.Field( + proto.STRING, + number=2, + ) + + +class UsableSubnetworkSecondaryRange(proto.Message): + r"""Secondary IP range of a usable subnetwork. + Attributes: + range_name (str): + The name associated with this subnetwork + secondary range, used when adding an alias IP + range to a VM instance. + ip_cidr_range (str): + The range of IP addresses belonging to this + subnetwork secondary range. + status (google.container_v1beta1.types.UsableSubnetworkSecondaryRange.Status): + This field is to determine the status of the + secondary range programmably. + """ + class Status(proto.Enum): + r"""Status shows the current usage of a secondary IP range.""" + UNKNOWN = 0 + UNUSED = 1 + IN_USE_SERVICE = 2 + IN_USE_SHAREABLE_POD = 3 + IN_USE_MANAGED_POD = 4 + + range_name = proto.Field( + proto.STRING, + number=1, + ) + ip_cidr_range = proto.Field( + proto.STRING, + number=2, + ) + status = proto.Field( + proto.ENUM, + number=3, + enum=Status, + ) + + +class UsableSubnetwork(proto.Message): + r"""UsableSubnetwork resource returns the subnetwork name, its + associated network and the primary CIDR range. + + Attributes: + subnetwork (str): + Subnetwork Name. + Example: projects/my-project/regions/us- + central1/subnetworks/my-subnet + network (str): + Network Name. + Example: projects/my-project/global/networks/my- + network + ip_cidr_range (str): + The range of internal addresses that are + owned by this subnetwork. + secondary_ip_ranges (Sequence[google.container_v1beta1.types.UsableSubnetworkSecondaryRange]): + Secondary IP ranges. + status_message (str): + A human readable status message representing the reasons for + cases where the caller cannot use the secondary ranges under + the subnet. For example if the secondary_ip_ranges is empty + due to a permission issue, an insufficient permission + message will be given by status_message. + """ + + subnetwork = proto.Field( + proto.STRING, + number=1, + ) + network = proto.Field( + proto.STRING, + number=2, + ) + ip_cidr_range = proto.Field( + proto.STRING, + number=3, + ) + secondary_ip_ranges = proto.RepeatedField( + proto.MESSAGE, + number=4, + message='UsableSubnetworkSecondaryRange', + ) + status_message = proto.Field( + proto.STRING, + number=5, + ) + + +class VerticalPodAutoscaling(proto.Message): + r"""VerticalPodAutoscaling contains global, per-cluster + information required by Vertical Pod Autoscaler to automatically + adjust the resources of pods controlled by it. + + Attributes: + enabled (bool): + Enables vertical pod autoscaling. + """ + + enabled = proto.Field( + proto.BOOL, + number=1, + ) + + +class DefaultSnatStatus(proto.Message): + r"""DefaultSnatStatus contains the desired state of whether + default sNAT should be disabled on the cluster. + + Attributes: + disabled (bool): + Disables cluster default sNAT rules. + """ + + disabled = proto.Field( + proto.BOOL, + number=1, + ) + + +class IntraNodeVisibilityConfig(proto.Message): + r"""IntraNodeVisibilityConfig contains the desired config of the + intra-node visibility on this cluster. + + Attributes: + enabled (bool): + Enables intra node visibility for this + cluster. + """ + + enabled = proto.Field( + proto.BOOL, + number=1, + ) + + +class MaxPodsConstraint(proto.Message): + r"""Constraints applied to pods. + Attributes: + max_pods_per_node (int): + Constraint enforced on the max num of pods + per node. + """ + + max_pods_per_node = proto.Field( + proto.INT64, + number=1, + ) + + +class WorkloadIdentityConfig(proto.Message): + r"""Configuration for the use of Kubernetes Service Accounts in + GCP IAM policies. + + Attributes: + identity_namespace (str): + IAM Identity Namespace to attach all + Kubernetes Service Accounts to. + workload_pool (str): + The workload pool to attach all Kubernetes + service accounts to. + identity_provider (str): + identity provider is the third party identity + provider. + """ + + identity_namespace = proto.Field( + proto.STRING, + number=1, + ) + workload_pool = proto.Field( + proto.STRING, + number=2, + ) + identity_provider = proto.Field( + proto.STRING, + number=3, + ) + + +class DatabaseEncryption(proto.Message): + r"""Configuration of etcd encryption. + Attributes: + state (google.container_v1beta1.types.DatabaseEncryption.State): + Denotes the state of etcd encryption. + key_name (str): + Name of CloudKMS key to use for the + encryption of secrets in etcd. Ex. projects/my- + project/locations/global/keyRings/my- + ring/cryptoKeys/my-key + """ + class State(proto.Enum): + r"""State of etcd encryption.""" + UNKNOWN = 0 + ENCRYPTED = 1 + DECRYPTED = 2 + + state = proto.Field( + proto.ENUM, + number=2, + enum=State, + ) + key_name = proto.Field( + proto.STRING, + number=1, + ) + + +class ResourceUsageExportConfig(proto.Message): + r"""Configuration for exporting cluster resource usages. + Attributes: + bigquery_destination (google.container_v1beta1.types.ResourceUsageExportConfig.BigQueryDestination): + Configuration to use BigQuery as usage export + destination. + enable_network_egress_metering (bool): + Whether to enable network egress metering for + this cluster. If enabled, a daemonset will be + created in the cluster to meter network egress + traffic. + consumption_metering_config (google.container_v1beta1.types.ResourceUsageExportConfig.ConsumptionMeteringConfig): + Configuration to enable resource consumption + metering. + """ + + class BigQueryDestination(proto.Message): + r"""Parameters for using BigQuery as the destination of resource + usage export. + + Attributes: + dataset_id (str): + The ID of a BigQuery Dataset. + """ + + dataset_id = proto.Field( + proto.STRING, + number=1, + ) + + class ConsumptionMeteringConfig(proto.Message): + r"""Parameters for controlling consumption metering. + Attributes: + enabled (bool): + Whether to enable consumption metering for + this cluster. If enabled, a second BigQuery + table will be created to hold resource + consumption records. + """ + + enabled = proto.Field( + proto.BOOL, + number=1, + ) + + bigquery_destination = proto.Field( + proto.MESSAGE, + number=1, + message=BigQueryDestination, + ) + enable_network_egress_metering = proto.Field( + proto.BOOL, + number=2, + ) + consumption_metering_config = proto.Field( + proto.MESSAGE, + number=3, + message=ConsumptionMeteringConfig, + ) + + +class ShieldedNodes(proto.Message): + r"""Configuration of Shielded Nodes feature. + Attributes: + enabled (bool): + Whether Shielded Nodes features are enabled + on all nodes in this cluster. + """ + + enabled = proto.Field( + proto.BOOL, + number=1, + ) + + +class GetOpenIDConfigRequest(proto.Message): + r"""GetOpenIDConfigRequest gets the OIDC discovery document for + the cluster. See the OpenID Connect Discovery 1.0 specification + for details. + + Attributes: + parent (str): + The cluster (project, location, cluster id) to get the + discovery document for. Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + + +class GetOpenIDConfigResponse(proto.Message): + r"""GetOpenIDConfigResponse is an OIDC discovery document for the + cluster. See the OpenID Connect Discovery 1.0 specification for + details. + + Attributes: + issuer (str): + OIDC Issuer. + jwks_uri (str): + JSON Web Key uri. + response_types_supported (Sequence[str]): + Supported response types. + subject_types_supported (Sequence[str]): + Supported subject types. + id_token_signing_alg_values_supported (Sequence[str]): + supported ID Token signing Algorithms. + claims_supported (Sequence[str]): + Supported claims. + grant_types (Sequence[str]): + Supported grant types. + """ + + issuer = proto.Field( + proto.STRING, + number=1, + ) + jwks_uri = proto.Field( + proto.STRING, + number=2, + ) + response_types_supported = proto.RepeatedField( + proto.STRING, + number=3, + ) + subject_types_supported = proto.RepeatedField( + proto.STRING, + number=4, + ) + id_token_signing_alg_values_supported = proto.RepeatedField( + proto.STRING, + number=5, + ) + claims_supported = proto.RepeatedField( + proto.STRING, + number=6, + ) + grant_types = proto.RepeatedField( + proto.STRING, + number=7, + ) + + +class GetJSONWebKeysRequest(proto.Message): + r"""GetJSONWebKeysRequest gets the public component of the keys used by + the cluster to sign token requests. This will be the jwks_uri for + the discover document returned by getOpenIDConfig. See the OpenID + Connect Discovery 1.0 specification for details. + + Attributes: + parent (str): + The cluster (project, location, cluster id) to get keys for. + Specified in the format + ``projects/*/locations/*/clusters/*``. + """ + + parent = proto.Field( + proto.STRING, + number=1, + ) + + +class Jwk(proto.Message): + r"""Jwk is a JSON Web Key as specified in RFC 7517 + Attributes: + kty (str): + Key Type. + alg (str): + Algorithm. + use (str): + Permitted uses for the public keys. + kid (str): + Key ID. + n (str): + Used for RSA keys. + e (str): + Used for RSA keys. + x (str): + Used for ECDSA keys. + y (str): + Used for ECDSA keys. + crv (str): + Used for ECDSA keys. + """ + + kty = proto.Field( + proto.STRING, + number=1, + ) + alg = proto.Field( + proto.STRING, + number=2, + ) + use = proto.Field( + proto.STRING, + number=3, + ) + kid = proto.Field( + proto.STRING, + number=4, + ) + n = proto.Field( + proto.STRING, + number=5, + ) + e = proto.Field( + proto.STRING, + number=6, + ) + x = proto.Field( + proto.STRING, + number=7, + ) + y = proto.Field( + proto.STRING, + number=8, + ) + crv = proto.Field( + proto.STRING, + number=9, + ) + + +class GetJSONWebKeysResponse(proto.Message): + r"""GetJSONWebKeysResponse is a valid JSON Web Key Set as + specififed in rfc 7517 + + Attributes: + keys (Sequence[google.container_v1beta1.types.Jwk]): + The public component of the keys used by the + cluster to sign token requests. + """ + + keys = proto.RepeatedField( + proto.MESSAGE, + number=1, + message='Jwk', + ) + + +class ReleaseChannel(proto.Message): + r"""ReleaseChannel indicates which release channel a cluster is + subscribed to. Release channels are arranged in order of risk. + When a cluster is subscribed to a release channel, Google + maintains both the master version and the node version. Node + auto-upgrade defaults to true and cannot be disabled. + + Attributes: + channel (google.container_v1beta1.types.ReleaseChannel.Channel): + channel specifies which release channel the + cluster is subscribed to. + """ + class Channel(proto.Enum): + r"""Possible values for 'channel'.""" + UNSPECIFIED = 0 + RAPID = 1 + REGULAR = 2 + STABLE = 3 + + channel = proto.Field( + proto.ENUM, + number=1, + enum=Channel, + ) + + +class TpuConfig(proto.Message): + r"""Configuration for Cloud TPU. + Attributes: + enabled (bool): + Whether Cloud TPU integration is enabled or + not. + use_service_networking (bool): + Whether to use service networking for Cloud + TPU or not. + ipv4_cidr_block (str): + IPv4 CIDR block reserved for Cloud TPU in the + VPC. + """ + + enabled = proto.Field( + proto.BOOL, + number=1, + ) + use_service_networking = proto.Field( + proto.BOOL, + number=2, + ) + ipv4_cidr_block = proto.Field( + proto.STRING, + number=3, + ) + + +class Master(proto.Message): + r"""Master is the configuration for components on master. """ + + +class NotificationConfig(proto.Message): + r"""NotificationConfig is the configuration of notifications. + Attributes: + pubsub (google.container_v1beta1.types.NotificationConfig.PubSub): + Notification config for Pub/Sub. + """ + + class PubSub(proto.Message): + r"""Pub/Sub specific notification config. + Attributes: + enabled (bool): + Enable notifications for Pub/Sub. + topic (str): + The desired Pub/Sub topic to which notifications will be + sent by GKE. Format is + ``projects/{project}/topics/{topic}``. + """ + + enabled = proto.Field( + proto.BOOL, + number=1, + ) + topic = proto.Field( + proto.STRING, + number=2, + ) + + pubsub = proto.Field( + proto.MESSAGE, + number=1, + message=PubSub, + ) + + +class ConfidentialNodes(proto.Message): + r"""ConfidentialNodes is configuration for the confidential nodes + feature, which makes nodes run on confidential VMs. + + Attributes: + enabled (bool): + Whether Confidential Nodes feature is enabled + for all nodes in this cluster. + """ + + enabled = proto.Field( + proto.BOOL, + number=1, + ) + + +class UpgradeEvent(proto.Message): + r"""UpgradeEvent is a notification sent to customers by the + cluster server when a resource is upgrading. + + Attributes: + resource_type (google.container_v1beta1.types.UpgradeResourceType): + Required. The resource type that is + upgrading. + operation (str): + Required. The operation associated with this + upgrade. + operation_start_time (google.protobuf.timestamp_pb2.Timestamp): + Required. The time when the operation was + started. + current_version (str): + Required. The current version before the + upgrade. + target_version (str): + Required. The target version for the upgrade. + resource (str): + Optional. Optional relative path to the + resource. For example in node pool upgrades, the + relative path of the node pool. + """ + + resource_type = proto.Field( + proto.ENUM, + number=1, + enum='UpgradeResourceType', + ) + operation = proto.Field( + proto.STRING, + number=2, + ) + operation_start_time = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + current_version = proto.Field( + proto.STRING, + number=4, + ) + target_version = proto.Field( + proto.STRING, + number=5, + ) + resource = proto.Field( + proto.STRING, + number=6, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/mypy.ini b/owl-bot-staging/v1beta1/mypy.ini new file mode 100644 index 00000000..4505b485 --- /dev/null +++ b/owl-bot-staging/v1beta1/mypy.ini @@ -0,0 +1,3 @@ +[mypy] +python_version = 3.6 +namespace_packages = True diff --git a/owl-bot-staging/v1beta1/noxfile.py b/owl-bot-staging/v1beta1/noxfile.py new file mode 100644 index 00000000..b289931c --- /dev/null +++ b/owl-bot-staging/v1beta1/noxfile.py @@ -0,0 +1,132 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import pathlib +import shutil +import subprocess +import sys + + +import nox # type: ignore + +CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() + +LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt" +PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8") + + +nox.sessions = [ + "unit", + "cover", + "mypy", + "check_lower_bounds" + # exclude update_lower_bounds from default + "docs", +] + +@nox.session(python=['3.6', '3.7', '3.8', '3.9']) +def unit(session): + """Run the unit test suite.""" + + session.install('coverage', 'pytest', 'pytest-cov', 'asyncmock', 'pytest-asyncio') + session.install('-e', '.') + + session.run( + 'py.test', + '--quiet', + '--cov=google/container_v1beta1/', + '--cov-config=.coveragerc', + '--cov-report=term', + '--cov-report=html', + os.path.join('tests', 'unit', ''.join(session.posargs)) + ) + + +@nox.session(python='3.7') +def cover(session): + """Run the final coverage report. + This outputs the coverage report aggregating coverage from the unit + test runs (not system test runs), and then erases coverage data. + """ + session.install("coverage", "pytest-cov") + session.run("coverage", "report", "--show-missing", "--fail-under=100") + + session.run("coverage", "erase") + + +@nox.session(python=['3.6', '3.7']) +def mypy(session): + """Run the type checker.""" + session.install('mypy', 'types-pkg_resources') + session.install('.') + session.run( + 'mypy', + '--explicit-package-bases', + 'google', + ) + + +@nox.session +def update_lower_bounds(session): + """Update lower bounds in constraints.txt to match setup.py""" + session.install('google-cloud-testutils') + session.install('.') + + session.run( + 'lower-bound-checker', + 'update', + '--package-name', + PACKAGE_NAME, + '--constraints-file', + str(LOWER_BOUND_CONSTRAINTS_FILE), + ) + + +@nox.session +def check_lower_bounds(session): + """Check lower bounds in setup.py are reflected in constraints file""" + session.install('google-cloud-testutils') + session.install('.') + + session.run( + 'lower-bound-checker', + 'check', + '--package-name', + PACKAGE_NAME, + '--constraints-file', + str(LOWER_BOUND_CONSTRAINTS_FILE), + ) + +@nox.session(python='3.6') +def docs(session): + """Build the docs for this library.""" + + session.install("-e", ".") + session.install("sphinx<3.0.0", "alabaster", "recommonmark") + + shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) + session.run( + "sphinx-build", + "-W", # warnings as errors + "-T", # show full traceback on exception + "-N", # no colors + "-b", + "html", + "-d", + os.path.join("docs", "_build", "doctrees", ""), + os.path.join("docs", ""), + os.path.join("docs", "_build", "html", ""), + ) diff --git a/owl-bot-staging/v1beta1/scripts/fixup_container_v1beta1_keywords.py b/owl-bot-staging/v1beta1/scripts/fixup_container_v1beta1_keywords.py new file mode 100644 index 00000000..fcd6cd36 --- /dev/null +++ b/owl-bot-staging/v1beta1/scripts/fixup_container_v1beta1_keywords.py @@ -0,0 +1,208 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class containerCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + 'cancel_operation': ('project_id', 'zone', 'operation_id', 'name', ), + 'complete_ip_rotation': ('project_id', 'zone', 'cluster_id', 'name', ), + 'create_cluster': ('project_id', 'zone', 'cluster', 'parent', ), + 'create_node_pool': ('project_id', 'zone', 'cluster_id', 'node_pool', 'parent', ), + 'delete_cluster': ('project_id', 'zone', 'cluster_id', 'name', ), + 'delete_node_pool': ('project_id', 'zone', 'cluster_id', 'node_pool_id', 'name', ), + 'get_cluster': ('project_id', 'zone', 'cluster_id', 'name', ), + 'get_json_web_keys': ('parent', ), + 'get_node_pool': ('project_id', 'zone', 'cluster_id', 'node_pool_id', 'name', ), + 'get_operation': ('project_id', 'zone', 'operation_id', 'name', ), + 'get_server_config': ('project_id', 'zone', 'name', ), + 'list_clusters': ('project_id', 'zone', 'parent', ), + 'list_locations': ('parent', ), + 'list_node_pools': ('project_id', 'zone', 'cluster_id', 'parent', ), + 'list_operations': ('project_id', 'zone', 'parent', ), + 'list_usable_subnetworks': ('parent', 'filter', 'page_size', 'page_token', ), + 'rollback_node_pool_upgrade': ('project_id', 'zone', 'cluster_id', 'node_pool_id', 'name', ), + 'set_addons_config': ('project_id', 'zone', 'cluster_id', 'addons_config', 'name', ), + 'set_labels': ('project_id', 'zone', 'cluster_id', 'resource_labels', 'label_fingerprint', 'name', ), + 'set_legacy_abac': ('project_id', 'zone', 'cluster_id', 'enabled', 'name', ), + 'set_locations': ('project_id', 'zone', 'cluster_id', 'locations', 'name', ), + 'set_logging_service': ('project_id', 'zone', 'cluster_id', 'logging_service', 'name', ), + 'set_maintenance_policy': ('project_id', 'zone', 'cluster_id', 'maintenance_policy', 'name', ), + 'set_master_auth': ('project_id', 'zone', 'cluster_id', 'action', 'update', 'name', ), + 'set_monitoring_service': ('project_id', 'zone', 'cluster_id', 'monitoring_service', 'name', ), + 'set_network_policy': ('project_id', 'zone', 'cluster_id', 'network_policy', 'name', ), + 'set_node_pool_autoscaling': ('project_id', 'zone', 'cluster_id', 'node_pool_id', 'autoscaling', 'name', ), + 'set_node_pool_management': ('project_id', 'zone', 'cluster_id', 'node_pool_id', 'management', 'name', ), + 'set_node_pool_size': ('project_id', 'zone', 'cluster_id', 'node_pool_id', 'node_count', 'name', ), + 'start_ip_rotation': ('project_id', 'zone', 'cluster_id', 'name', 'rotate_credentials', ), + 'update_cluster': ('project_id', 'zone', 'cluster_id', 'update', 'name', ), + 'update_master': ('project_id', 'zone', 'cluster_id', 'master_version', 'name', ), + 'update_node_pool': ('project_id', 'zone', 'cluster_id', 'node_pool_id', 'node_version', 'image_type', 'locations', 'workload_metadata_config', 'name', 'upgrade_settings', 'linux_node_config', 'kubelet_config', ), + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: not a.keyword.value in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), +cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=containerCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the container client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1beta1/setup.py b/owl-bot-staging/v1beta1/setup.py new file mode 100644 index 00000000..eb059ee8 --- /dev/null +++ b/owl-bot-staging/v1beta1/setup.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import io +import os +import setuptools # type: ignore + +version = '0.1.0' + +package_root = os.path.abspath(os.path.dirname(__file__)) + +readme_filename = os.path.join(package_root, 'README.rst') +with io.open(readme_filename, encoding='utf-8') as readme_file: + readme = readme_file.read() + +setuptools.setup( + name='google-container', + version=version, + long_description=readme, + packages=setuptools.PEP420PackageFinder.find(), + namespace_packages=('google',), + platforms='Posix; MacOS X; Windows', + include_package_data=True, + install_requires=( + 'google-api-core[grpc] >= 1.27.0, < 3.0.0dev', + 'libcst >= 0.2.5', + 'proto-plus >= 1.15.0', + 'packaging >= 14.3', ), + python_requires='>=3.6', + classifiers=[ + 'Development Status :: 3 - Alpha', + 'Intended Audience :: Developers', + 'Operating System :: OS Independent', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', + 'Topic :: Internet', + 'Topic :: Software Development :: Libraries :: Python Modules', + ], + zip_safe=False, +) diff --git a/owl-bot-staging/v1beta1/tests/__init__.py b/owl-bot-staging/v1beta1/tests/__init__.py new file mode 100644 index 00000000..b54a5fcc --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/tests/unit/__init__.py b/owl-bot-staging/v1beta1/tests/unit/__init__.py new file mode 100644 index 00000000..b54a5fcc --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/__init__.py b/owl-bot-staging/v1beta1/tests/unit/gapic/__init__.py new file mode 100644 index 00000000..b54a5fcc --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/container_v1beta1/__init__.py b/owl-bot-staging/v1beta1/tests/unit/gapic/container_v1beta1/__init__.py new file mode 100644 index 00000000..b54a5fcc --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/container_v1beta1/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/container_v1beta1/test_cluster_manager.py b/owl-bot-staging/v1beta1/tests/unit/gapic/container_v1beta1/test_cluster_manager.py new file mode 100644 index 00000000..55a4b87b --- /dev/null +++ b/owl-bot-staging/v1beta1/tests/unit/gapic/container_v1beta1/test_cluster_manager.py @@ -0,0 +1,9846 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock +import packaging.version + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.container_v1beta1.services.cluster_manager import ClusterManagerAsyncClient +from google.container_v1beta1.services.cluster_manager import ClusterManagerClient +from google.container_v1beta1.services.cluster_manager import pagers +from google.container_v1beta1.services.cluster_manager import transports +from google.container_v1beta1.services.cluster_manager.transports.base import _GOOGLE_AUTH_VERSION +from google.container_v1beta1.types import cluster_service +from google.oauth2 import service_account +from google.protobuf import timestamp_pb2 # type: ignore +from google.protobuf import wrappers_pb2 # type: ignore +from google.rpc import code_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore +import google.auth + + +# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively +# through google-api-core: +# - Delete the auth "less than" test cases +# - Delete these pytest markers (Make the "greater than or equal to" tests the default). +requires_google_auth_lt_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), + reason="This test requires google-auth < 1.25.0", +) +requires_google_auth_gte_1_25_0 = pytest.mark.skipif( + packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), + reason="This test requires google-auth >= 1.25.0", +) + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert ClusterManagerClient._get_default_mtls_endpoint(None) is None + assert ClusterManagerClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert ClusterManagerClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert ClusterManagerClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert ClusterManagerClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert ClusterManagerClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + ClusterManagerClient, + ClusterManagerAsyncClient, +]) +def test_cluster_manager_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'container.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.ClusterManagerGrpcTransport, "grpc"), + (transports.ClusterManagerGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_cluster_manager_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + ClusterManagerClient, + ClusterManagerAsyncClient, +]) +def test_cluster_manager_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'container.googleapis.com:443' + + +def test_cluster_manager_client_get_transport_class(): + transport = ClusterManagerClient.get_transport_class() + available_transports = [ + transports.ClusterManagerGrpcTransport, + ] + assert transport in available_transports + + transport = ClusterManagerClient.get_transport_class("grpc") + assert transport == transports.ClusterManagerGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ClusterManagerClient, transports.ClusterManagerGrpcTransport, "grpc"), + (ClusterManagerAsyncClient, transports.ClusterManagerGrpcAsyncIOTransport, "grpc_asyncio"), +]) +@mock.patch.object(ClusterManagerClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ClusterManagerClient)) +@mock.patch.object(ClusterManagerAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ClusterManagerAsyncClient)) +def test_cluster_manager_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(ClusterManagerClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(ClusterManagerClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (ClusterManagerClient, transports.ClusterManagerGrpcTransport, "grpc", "true"), + (ClusterManagerAsyncClient, transports.ClusterManagerGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (ClusterManagerClient, transports.ClusterManagerGrpcTransport, "grpc", "false"), + (ClusterManagerAsyncClient, transports.ClusterManagerGrpcAsyncIOTransport, "grpc_asyncio", "false"), +]) +@mock.patch.object(ClusterManagerClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ClusterManagerClient)) +@mock.patch.object(ClusterManagerAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ClusterManagerAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_cluster_manager_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class() + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ClusterManagerClient, transports.ClusterManagerGrpcTransport, "grpc"), + (ClusterManagerAsyncClient, transports.ClusterManagerGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_cluster_manager_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ClusterManagerClient, transports.ClusterManagerGrpcTransport, "grpc"), + (ClusterManagerAsyncClient, transports.ClusterManagerGrpcAsyncIOTransport, "grpc_asyncio"), +]) +def test_cluster_manager_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_cluster_manager_client_client_options_from_dict(): + with mock.patch('google.container_v1beta1.services.cluster_manager.transports.ClusterManagerGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = ClusterManagerClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_list_clusters(transport: str = 'grpc', request_type=cluster_service.ListClustersRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListClustersResponse( + missing_zones=['missing_zones_value'], + ) + response = client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.ListClustersRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.ListClustersResponse) + assert response.missing_zones == ['missing_zones_value'] + + +def test_list_clusters_from_dict(): + test_list_clusters(request_type=dict) + + +def test_list_clusters_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__') as call: + client.list_clusters() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.ListClustersRequest() + + +@pytest.mark.asyncio +async def test_list_clusters_async(transport: str = 'grpc_asyncio', request_type=cluster_service.ListClustersRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListClustersResponse( + missing_zones=['missing_zones_value'], + )) + response = await client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.ListClustersRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.ListClustersResponse) + assert response.missing_zones == ['missing_zones_value'] + + +@pytest.mark.asyncio +async def test_list_clusters_async_from_dict(): + await test_list_clusters_async(request_type=dict) + + +def test_list_clusters_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.ListClustersRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__') as call: + call.return_value = cluster_service.ListClustersResponse() + client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_clusters_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.ListClustersRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListClustersResponse()) + await client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_clusters_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListClustersResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_clusters( + project_id='project_id_value', + zone='zone_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + + +def test_list_clusters_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_clusters( + cluster_service.ListClustersRequest(), + project_id='project_id_value', + zone='zone_value', + ) + + +@pytest.mark.asyncio +async def test_list_clusters_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListClustersResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListClustersResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_clusters( + project_id='project_id_value', + zone='zone_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + + +@pytest.mark.asyncio +async def test_list_clusters_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_clusters( + cluster_service.ListClustersRequest(), + project_id='project_id_value', + zone='zone_value', + ) + + +def test_get_cluster(transport: str = 'grpc', request_type=cluster_service.GetClusterRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Cluster( + name='name_value', + description='description_value', + initial_node_count=1911, + logging_service='logging_service_value', + monitoring_service='monitoring_service_value', + network='network_value', + cluster_ipv4_cidr='cluster_ipv4_cidr_value', + subnetwork='subnetwork_value', + locations=['locations_value'], + enable_kubernetes_alpha=True, + label_fingerprint='label_fingerprint_value', + private_cluster=True, + master_ipv4_cidr_block='master_ipv4_cidr_block_value', + self_link='self_link_value', + zone='zone_value', + endpoint='endpoint_value', + initial_cluster_version='initial_cluster_version_value', + current_master_version='current_master_version_value', + current_node_version='current_node_version_value', + create_time='create_time_value', + status=cluster_service.Cluster.Status.PROVISIONING, + status_message='status_message_value', + node_ipv4_cidr_size=1955, + services_ipv4_cidr='services_ipv4_cidr_value', + instance_group_urls=['instance_group_urls_value'], + current_node_count=1936, + expire_time='expire_time_value', + location='location_value', + enable_tpu=True, + tpu_ipv4_cidr_block='tpu_ipv4_cidr_block_value', + ) + response = client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.GetClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Cluster) + assert response.name == 'name_value' + assert response.description == 'description_value' + assert response.initial_node_count == 1911 + assert response.logging_service == 'logging_service_value' + assert response.monitoring_service == 'monitoring_service_value' + assert response.network == 'network_value' + assert response.cluster_ipv4_cidr == 'cluster_ipv4_cidr_value' + assert response.subnetwork == 'subnetwork_value' + assert response.locations == ['locations_value'] + assert response.enable_kubernetes_alpha is True + assert response.label_fingerprint == 'label_fingerprint_value' + assert response.private_cluster is True + assert response.master_ipv4_cidr_block == 'master_ipv4_cidr_block_value' + assert response.self_link == 'self_link_value' + assert response.zone == 'zone_value' + assert response.endpoint == 'endpoint_value' + assert response.initial_cluster_version == 'initial_cluster_version_value' + assert response.current_master_version == 'current_master_version_value' + assert response.current_node_version == 'current_node_version_value' + assert response.create_time == 'create_time_value' + assert response.status == cluster_service.Cluster.Status.PROVISIONING + assert response.status_message == 'status_message_value' + assert response.node_ipv4_cidr_size == 1955 + assert response.services_ipv4_cidr == 'services_ipv4_cidr_value' + assert response.instance_group_urls == ['instance_group_urls_value'] + assert response.current_node_count == 1936 + assert response.expire_time == 'expire_time_value' + assert response.location == 'location_value' + assert response.enable_tpu is True + assert response.tpu_ipv4_cidr_block == 'tpu_ipv4_cidr_block_value' + + +def test_get_cluster_from_dict(): + test_get_cluster(request_type=dict) + + +def test_get_cluster_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_cluster), + '__call__') as call: + client.get_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.GetClusterRequest() + + +@pytest.mark.asyncio +async def test_get_cluster_async(transport: str = 'grpc_asyncio', request_type=cluster_service.GetClusterRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Cluster( + name='name_value', + description='description_value', + initial_node_count=1911, + logging_service='logging_service_value', + monitoring_service='monitoring_service_value', + network='network_value', + cluster_ipv4_cidr='cluster_ipv4_cidr_value', + subnetwork='subnetwork_value', + locations=['locations_value'], + enable_kubernetes_alpha=True, + label_fingerprint='label_fingerprint_value', + private_cluster=True, + master_ipv4_cidr_block='master_ipv4_cidr_block_value', + self_link='self_link_value', + zone='zone_value', + endpoint='endpoint_value', + initial_cluster_version='initial_cluster_version_value', + current_master_version='current_master_version_value', + current_node_version='current_node_version_value', + create_time='create_time_value', + status=cluster_service.Cluster.Status.PROVISIONING, + status_message='status_message_value', + node_ipv4_cidr_size=1955, + services_ipv4_cidr='services_ipv4_cidr_value', + instance_group_urls=['instance_group_urls_value'], + current_node_count=1936, + expire_time='expire_time_value', + location='location_value', + enable_tpu=True, + tpu_ipv4_cidr_block='tpu_ipv4_cidr_block_value', + )) + response = await client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.GetClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Cluster) + assert response.name == 'name_value' + assert response.description == 'description_value' + assert response.initial_node_count == 1911 + assert response.logging_service == 'logging_service_value' + assert response.monitoring_service == 'monitoring_service_value' + assert response.network == 'network_value' + assert response.cluster_ipv4_cidr == 'cluster_ipv4_cidr_value' + assert response.subnetwork == 'subnetwork_value' + assert response.locations == ['locations_value'] + assert response.enable_kubernetes_alpha is True + assert response.label_fingerprint == 'label_fingerprint_value' + assert response.private_cluster is True + assert response.master_ipv4_cidr_block == 'master_ipv4_cidr_block_value' + assert response.self_link == 'self_link_value' + assert response.zone == 'zone_value' + assert response.endpoint == 'endpoint_value' + assert response.initial_cluster_version == 'initial_cluster_version_value' + assert response.current_master_version == 'current_master_version_value' + assert response.current_node_version == 'current_node_version_value' + assert response.create_time == 'create_time_value' + assert response.status == cluster_service.Cluster.Status.PROVISIONING + assert response.status_message == 'status_message_value' + assert response.node_ipv4_cidr_size == 1955 + assert response.services_ipv4_cidr == 'services_ipv4_cidr_value' + assert response.instance_group_urls == ['instance_group_urls_value'] + assert response.current_node_count == 1936 + assert response.expire_time == 'expire_time_value' + assert response.location == 'location_value' + assert response.enable_tpu is True + assert response.tpu_ipv4_cidr_block == 'tpu_ipv4_cidr_block_value' + + +@pytest.mark.asyncio +async def test_get_cluster_async_from_dict(): + await test_get_cluster_async(request_type=dict) + + +def test_get_cluster_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.GetClusterRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_cluster), + '__call__') as call: + call.return_value = cluster_service.Cluster() + client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_cluster_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.GetClusterRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_cluster), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Cluster()) + await client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_cluster_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Cluster() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_cluster( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + + +def test_get_cluster_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_cluster( + cluster_service.GetClusterRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + ) + + +@pytest.mark.asyncio +async def test_get_cluster_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Cluster() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Cluster()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_cluster( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + + +@pytest.mark.asyncio +async def test_get_cluster_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_cluster( + cluster_service.GetClusterRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + ) + + +def test_create_cluster(transport: str = 'grpc', request_type=cluster_service.CreateClusterRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + ) + response = client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.CreateClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +def test_create_cluster_from_dict(): + test_create_cluster(request_type=dict) + + +def test_create_cluster_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_cluster), + '__call__') as call: + client.create_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.CreateClusterRequest() + + +@pytest.mark.asyncio +async def test_create_cluster_async(transport: str = 'grpc_asyncio', request_type=cluster_service.CreateClusterRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + )) + response = await client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.CreateClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +@pytest.mark.asyncio +async def test_create_cluster_async_from_dict(): + await test_create_cluster_async(request_type=dict) + + +def test_create_cluster_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.CreateClusterRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_cluster), + '__call__') as call: + call.return_value = cluster_service.Operation() + client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_cluster_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.CreateClusterRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_cluster), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + await client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_create_cluster_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_cluster( + project_id='project_id_value', + zone='zone_value', + cluster=cluster_service.Cluster(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster == cluster_service.Cluster(name='name_value') + + +def test_create_cluster_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_cluster( + cluster_service.CreateClusterRequest(), + project_id='project_id_value', + zone='zone_value', + cluster=cluster_service.Cluster(name='name_value'), + ) + + +@pytest.mark.asyncio +async def test_create_cluster_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_cluster( + project_id='project_id_value', + zone='zone_value', + cluster=cluster_service.Cluster(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster == cluster_service.Cluster(name='name_value') + + +@pytest.mark.asyncio +async def test_create_cluster_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_cluster( + cluster_service.CreateClusterRequest(), + project_id='project_id_value', + zone='zone_value', + cluster=cluster_service.Cluster(name='name_value'), + ) + + +def test_update_cluster(transport: str = 'grpc', request_type=cluster_service.UpdateClusterRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + ) + response = client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.UpdateClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +def test_update_cluster_from_dict(): + test_update_cluster(request_type=dict) + + +def test_update_cluster_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_cluster), + '__call__') as call: + client.update_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.UpdateClusterRequest() + + +@pytest.mark.asyncio +async def test_update_cluster_async(transport: str = 'grpc_asyncio', request_type=cluster_service.UpdateClusterRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + )) + response = await client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.UpdateClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +@pytest.mark.asyncio +async def test_update_cluster_async_from_dict(): + await test_update_cluster_async(request_type=dict) + + +def test_update_cluster_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.UpdateClusterRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_cluster), + '__call__') as call: + call.return_value = cluster_service.Operation() + client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_cluster_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.UpdateClusterRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_cluster), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + await client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_update_cluster_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_cluster( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + update=cluster_service.ClusterUpdate(desired_node_version='desired_node_version_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].update == cluster_service.ClusterUpdate(desired_node_version='desired_node_version_value') + + +def test_update_cluster_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_cluster( + cluster_service.UpdateClusterRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + update=cluster_service.ClusterUpdate(desired_node_version='desired_node_version_value'), + ) + + +@pytest.mark.asyncio +async def test_update_cluster_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_cluster( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + update=cluster_service.ClusterUpdate(desired_node_version='desired_node_version_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].update == cluster_service.ClusterUpdate(desired_node_version='desired_node_version_value') + + +@pytest.mark.asyncio +async def test_update_cluster_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_cluster( + cluster_service.UpdateClusterRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + update=cluster_service.ClusterUpdate(desired_node_version='desired_node_version_value'), + ) + + +def test_update_node_pool(transport: str = 'grpc', request_type=cluster_service.UpdateNodePoolRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_node_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + ) + response = client.update_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.UpdateNodePoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +def test_update_node_pool_from_dict(): + test_update_node_pool(request_type=dict) + + +def test_update_node_pool_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_node_pool), + '__call__') as call: + client.update_node_pool() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.UpdateNodePoolRequest() + + +@pytest.mark.asyncio +async def test_update_node_pool_async(transport: str = 'grpc_asyncio', request_type=cluster_service.UpdateNodePoolRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_node_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + )) + response = await client.update_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.UpdateNodePoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +@pytest.mark.asyncio +async def test_update_node_pool_async_from_dict(): + await test_update_node_pool_async(request_type=dict) + + +def test_update_node_pool_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.UpdateNodePoolRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_node_pool), + '__call__') as call: + call.return_value = cluster_service.Operation() + client.update_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_node_pool_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.UpdateNodePoolRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_node_pool), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + await client.update_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_set_node_pool_autoscaling(transport: str = 'grpc', request_type=cluster_service.SetNodePoolAutoscalingRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_node_pool_autoscaling), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + ) + response = client.set_node_pool_autoscaling(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetNodePoolAutoscalingRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +def test_set_node_pool_autoscaling_from_dict(): + test_set_node_pool_autoscaling(request_type=dict) + + +def test_set_node_pool_autoscaling_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_node_pool_autoscaling), + '__call__') as call: + client.set_node_pool_autoscaling() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetNodePoolAutoscalingRequest() + + +@pytest.mark.asyncio +async def test_set_node_pool_autoscaling_async(transport: str = 'grpc_asyncio', request_type=cluster_service.SetNodePoolAutoscalingRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_node_pool_autoscaling), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + )) + response = await client.set_node_pool_autoscaling(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetNodePoolAutoscalingRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +@pytest.mark.asyncio +async def test_set_node_pool_autoscaling_async_from_dict(): + await test_set_node_pool_autoscaling_async(request_type=dict) + + +def test_set_node_pool_autoscaling_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetNodePoolAutoscalingRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_node_pool_autoscaling), + '__call__') as call: + call.return_value = cluster_service.Operation() + client.set_node_pool_autoscaling(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_set_node_pool_autoscaling_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetNodePoolAutoscalingRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_node_pool_autoscaling), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + await client.set_node_pool_autoscaling(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_set_logging_service(transport: str = 'grpc', request_type=cluster_service.SetLoggingServiceRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_logging_service), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + ) + response = client.set_logging_service(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetLoggingServiceRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +def test_set_logging_service_from_dict(): + test_set_logging_service(request_type=dict) + + +def test_set_logging_service_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_logging_service), + '__call__') as call: + client.set_logging_service() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetLoggingServiceRequest() + + +@pytest.mark.asyncio +async def test_set_logging_service_async(transport: str = 'grpc_asyncio', request_type=cluster_service.SetLoggingServiceRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_logging_service), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + )) + response = await client.set_logging_service(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetLoggingServiceRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +@pytest.mark.asyncio +async def test_set_logging_service_async_from_dict(): + await test_set_logging_service_async(request_type=dict) + + +def test_set_logging_service_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetLoggingServiceRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_logging_service), + '__call__') as call: + call.return_value = cluster_service.Operation() + client.set_logging_service(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_set_logging_service_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetLoggingServiceRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_logging_service), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + await client.set_logging_service(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_set_logging_service_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_logging_service), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.set_logging_service( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + logging_service='logging_service_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].logging_service == 'logging_service_value' + + +def test_set_logging_service_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_logging_service( + cluster_service.SetLoggingServiceRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + logging_service='logging_service_value', + ) + + +@pytest.mark.asyncio +async def test_set_logging_service_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_logging_service), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.set_logging_service( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + logging_service='logging_service_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].logging_service == 'logging_service_value' + + +@pytest.mark.asyncio +async def test_set_logging_service_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.set_logging_service( + cluster_service.SetLoggingServiceRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + logging_service='logging_service_value', + ) + + +def test_set_monitoring_service(transport: str = 'grpc', request_type=cluster_service.SetMonitoringServiceRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_monitoring_service), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + ) + response = client.set_monitoring_service(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetMonitoringServiceRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +def test_set_monitoring_service_from_dict(): + test_set_monitoring_service(request_type=dict) + + +def test_set_monitoring_service_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_monitoring_service), + '__call__') as call: + client.set_monitoring_service() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetMonitoringServiceRequest() + + +@pytest.mark.asyncio +async def test_set_monitoring_service_async(transport: str = 'grpc_asyncio', request_type=cluster_service.SetMonitoringServiceRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_monitoring_service), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + )) + response = await client.set_monitoring_service(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetMonitoringServiceRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +@pytest.mark.asyncio +async def test_set_monitoring_service_async_from_dict(): + await test_set_monitoring_service_async(request_type=dict) + + +def test_set_monitoring_service_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetMonitoringServiceRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_monitoring_service), + '__call__') as call: + call.return_value = cluster_service.Operation() + client.set_monitoring_service(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_set_monitoring_service_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetMonitoringServiceRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_monitoring_service), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + await client.set_monitoring_service(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_set_monitoring_service_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_monitoring_service), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.set_monitoring_service( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + monitoring_service='monitoring_service_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].monitoring_service == 'monitoring_service_value' + + +def test_set_monitoring_service_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_monitoring_service( + cluster_service.SetMonitoringServiceRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + monitoring_service='monitoring_service_value', + ) + + +@pytest.mark.asyncio +async def test_set_monitoring_service_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_monitoring_service), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.set_monitoring_service( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + monitoring_service='monitoring_service_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].monitoring_service == 'monitoring_service_value' + + +@pytest.mark.asyncio +async def test_set_monitoring_service_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.set_monitoring_service( + cluster_service.SetMonitoringServiceRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + monitoring_service='monitoring_service_value', + ) + + +def test_set_addons_config(transport: str = 'grpc', request_type=cluster_service.SetAddonsConfigRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_addons_config), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + ) + response = client.set_addons_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetAddonsConfigRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +def test_set_addons_config_from_dict(): + test_set_addons_config(request_type=dict) + + +def test_set_addons_config_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_addons_config), + '__call__') as call: + client.set_addons_config() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetAddonsConfigRequest() + + +@pytest.mark.asyncio +async def test_set_addons_config_async(transport: str = 'grpc_asyncio', request_type=cluster_service.SetAddonsConfigRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_addons_config), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + )) + response = await client.set_addons_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetAddonsConfigRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +@pytest.mark.asyncio +async def test_set_addons_config_async_from_dict(): + await test_set_addons_config_async(request_type=dict) + + +def test_set_addons_config_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetAddonsConfigRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_addons_config), + '__call__') as call: + call.return_value = cluster_service.Operation() + client.set_addons_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_set_addons_config_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetAddonsConfigRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_addons_config), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + await client.set_addons_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_set_addons_config_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_addons_config), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.set_addons_config( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + addons_config=cluster_service.AddonsConfig(http_load_balancing=cluster_service.HttpLoadBalancing(disabled=True)), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].addons_config == cluster_service.AddonsConfig(http_load_balancing=cluster_service.HttpLoadBalancing(disabled=True)) + + +def test_set_addons_config_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_addons_config( + cluster_service.SetAddonsConfigRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + addons_config=cluster_service.AddonsConfig(http_load_balancing=cluster_service.HttpLoadBalancing(disabled=True)), + ) + + +@pytest.mark.asyncio +async def test_set_addons_config_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_addons_config), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.set_addons_config( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + addons_config=cluster_service.AddonsConfig(http_load_balancing=cluster_service.HttpLoadBalancing(disabled=True)), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].addons_config == cluster_service.AddonsConfig(http_load_balancing=cluster_service.HttpLoadBalancing(disabled=True)) + + +@pytest.mark.asyncio +async def test_set_addons_config_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.set_addons_config( + cluster_service.SetAddonsConfigRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + addons_config=cluster_service.AddonsConfig(http_load_balancing=cluster_service.HttpLoadBalancing(disabled=True)), + ) + + +def test_set_locations(transport: str = 'grpc', request_type=cluster_service.SetLocationsRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_locations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + ) + response = client.set_locations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetLocationsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +def test_set_locations_from_dict(): + test_set_locations(request_type=dict) + + +def test_set_locations_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_locations), + '__call__') as call: + client.set_locations() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetLocationsRequest() + + +@pytest.mark.asyncio +async def test_set_locations_async(transport: str = 'grpc_asyncio', request_type=cluster_service.SetLocationsRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_locations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + )) + response = await client.set_locations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetLocationsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +@pytest.mark.asyncio +async def test_set_locations_async_from_dict(): + await test_set_locations_async(request_type=dict) + + +def test_set_locations_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetLocationsRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_locations), + '__call__') as call: + call.return_value = cluster_service.Operation() + client.set_locations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_set_locations_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetLocationsRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_locations), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + await client.set_locations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_set_locations_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_locations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.set_locations( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + locations=['locations_value'], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].locations == ['locations_value'] + + +def test_set_locations_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_locations( + cluster_service.SetLocationsRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + locations=['locations_value'], + ) + + +@pytest.mark.asyncio +async def test_set_locations_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_locations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.set_locations( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + locations=['locations_value'], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].locations == ['locations_value'] + + +@pytest.mark.asyncio +async def test_set_locations_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.set_locations( + cluster_service.SetLocationsRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + locations=['locations_value'], + ) + + +def test_update_master(transport: str = 'grpc', request_type=cluster_service.UpdateMasterRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_master), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + ) + response = client.update_master(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.UpdateMasterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +def test_update_master_from_dict(): + test_update_master(request_type=dict) + + +def test_update_master_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_master), + '__call__') as call: + client.update_master() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.UpdateMasterRequest() + + +@pytest.mark.asyncio +async def test_update_master_async(transport: str = 'grpc_asyncio', request_type=cluster_service.UpdateMasterRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_master), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + )) + response = await client.update_master(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.UpdateMasterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +@pytest.mark.asyncio +async def test_update_master_async_from_dict(): + await test_update_master_async(request_type=dict) + + +def test_update_master_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.UpdateMasterRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_master), + '__call__') as call: + call.return_value = cluster_service.Operation() + client.update_master(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_master_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.UpdateMasterRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_master), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + await client.update_master(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_update_master_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_master), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_master( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + master_version='master_version_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].master_version == 'master_version_value' + + +def test_update_master_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_master( + cluster_service.UpdateMasterRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + master_version='master_version_value', + ) + + +@pytest.mark.asyncio +async def test_update_master_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_master), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_master( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + master_version='master_version_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].master_version == 'master_version_value' + + +@pytest.mark.asyncio +async def test_update_master_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_master( + cluster_service.UpdateMasterRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + master_version='master_version_value', + ) + + +def test_set_master_auth(transport: str = 'grpc', request_type=cluster_service.SetMasterAuthRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_master_auth), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + ) + response = client.set_master_auth(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetMasterAuthRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +def test_set_master_auth_from_dict(): + test_set_master_auth(request_type=dict) + + +def test_set_master_auth_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_master_auth), + '__call__') as call: + client.set_master_auth() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetMasterAuthRequest() + + +@pytest.mark.asyncio +async def test_set_master_auth_async(transport: str = 'grpc_asyncio', request_type=cluster_service.SetMasterAuthRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_master_auth), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + )) + response = await client.set_master_auth(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetMasterAuthRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +@pytest.mark.asyncio +async def test_set_master_auth_async_from_dict(): + await test_set_master_auth_async(request_type=dict) + + +def test_set_master_auth_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetMasterAuthRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_master_auth), + '__call__') as call: + call.return_value = cluster_service.Operation() + client.set_master_auth(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_set_master_auth_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetMasterAuthRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_master_auth), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + await client.set_master_auth(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_delete_cluster(transport: str = 'grpc', request_type=cluster_service.DeleteClusterRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + ) + response = client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.DeleteClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +def test_delete_cluster_from_dict(): + test_delete_cluster(request_type=dict) + + +def test_delete_cluster_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_cluster), + '__call__') as call: + client.delete_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.DeleteClusterRequest() + + +@pytest.mark.asyncio +async def test_delete_cluster_async(transport: str = 'grpc_asyncio', request_type=cluster_service.DeleteClusterRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + )) + response = await client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.DeleteClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +@pytest.mark.asyncio +async def test_delete_cluster_async_from_dict(): + await test_delete_cluster_async(request_type=dict) + + +def test_delete_cluster_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.DeleteClusterRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_cluster), + '__call__') as call: + call.return_value = cluster_service.Operation() + client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_cluster_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.DeleteClusterRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_cluster), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + await client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_delete_cluster_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_cluster( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + + +def test_delete_cluster_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_cluster( + cluster_service.DeleteClusterRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + ) + + +@pytest.mark.asyncio +async def test_delete_cluster_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_cluster( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + + +@pytest.mark.asyncio +async def test_delete_cluster_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_cluster( + cluster_service.DeleteClusterRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + ) + + +def test_list_operations(transport: str = 'grpc', request_type=cluster_service.ListOperationsRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_operations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListOperationsResponse( + missing_zones=['missing_zones_value'], + ) + response = client.list_operations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.ListOperationsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.ListOperationsResponse) + assert response.missing_zones == ['missing_zones_value'] + + +def test_list_operations_from_dict(): + test_list_operations(request_type=dict) + + +def test_list_operations_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_operations), + '__call__') as call: + client.list_operations() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.ListOperationsRequest() + + +@pytest.mark.asyncio +async def test_list_operations_async(transport: str = 'grpc_asyncio', request_type=cluster_service.ListOperationsRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_operations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListOperationsResponse( + missing_zones=['missing_zones_value'], + )) + response = await client.list_operations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.ListOperationsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.ListOperationsResponse) + assert response.missing_zones == ['missing_zones_value'] + + +@pytest.mark.asyncio +async def test_list_operations_async_from_dict(): + await test_list_operations_async(request_type=dict) + + +def test_list_operations_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.ListOperationsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_operations), + '__call__') as call: + call.return_value = cluster_service.ListOperationsResponse() + client.list_operations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_operations_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.ListOperationsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_operations), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListOperationsResponse()) + await client.list_operations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_operations_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_operations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListOperationsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_operations( + project_id='project_id_value', + zone='zone_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + + +def test_list_operations_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_operations( + cluster_service.ListOperationsRequest(), + project_id='project_id_value', + zone='zone_value', + ) + + +@pytest.mark.asyncio +async def test_list_operations_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_operations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListOperationsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListOperationsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_operations( + project_id='project_id_value', + zone='zone_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + + +@pytest.mark.asyncio +async def test_list_operations_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_operations( + cluster_service.ListOperationsRequest(), + project_id='project_id_value', + zone='zone_value', + ) + + +def test_get_operation(transport: str = 'grpc', request_type=cluster_service.GetOperationRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_operation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + ) + response = client.get_operation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.GetOperationRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +def test_get_operation_from_dict(): + test_get_operation(request_type=dict) + + +def test_get_operation_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_operation), + '__call__') as call: + client.get_operation() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.GetOperationRequest() + + +@pytest.mark.asyncio +async def test_get_operation_async(transport: str = 'grpc_asyncio', request_type=cluster_service.GetOperationRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_operation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + )) + response = await client.get_operation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.GetOperationRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +@pytest.mark.asyncio +async def test_get_operation_async_from_dict(): + await test_get_operation_async(request_type=dict) + + +def test_get_operation_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.GetOperationRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_operation), + '__call__') as call: + call.return_value = cluster_service.Operation() + client.get_operation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_operation_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.GetOperationRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_operation), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + await client.get_operation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_operation_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_operation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_operation( + project_id='project_id_value', + zone='zone_value', + operation_id='operation_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].operation_id == 'operation_id_value' + + +def test_get_operation_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_operation( + cluster_service.GetOperationRequest(), + project_id='project_id_value', + zone='zone_value', + operation_id='operation_id_value', + ) + + +@pytest.mark.asyncio +async def test_get_operation_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_operation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_operation( + project_id='project_id_value', + zone='zone_value', + operation_id='operation_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].operation_id == 'operation_id_value' + + +@pytest.mark.asyncio +async def test_get_operation_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_operation( + cluster_service.GetOperationRequest(), + project_id='project_id_value', + zone='zone_value', + operation_id='operation_id_value', + ) + + +def test_cancel_operation(transport: str = 'grpc', request_type=cluster_service.CancelOperationRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_operation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.cancel_operation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.CancelOperationRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_cancel_operation_from_dict(): + test_cancel_operation(request_type=dict) + + +def test_cancel_operation_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_operation), + '__call__') as call: + client.cancel_operation() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.CancelOperationRequest() + + +@pytest.mark.asyncio +async def test_cancel_operation_async(transport: str = 'grpc_asyncio', request_type=cluster_service.CancelOperationRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_operation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.cancel_operation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.CancelOperationRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_cancel_operation_async_from_dict(): + await test_cancel_operation_async(request_type=dict) + + +def test_cancel_operation_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.CancelOperationRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_operation), + '__call__') as call: + call.return_value = None + client.cancel_operation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_cancel_operation_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.CancelOperationRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_operation), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.cancel_operation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_cancel_operation_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_operation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.cancel_operation( + project_id='project_id_value', + zone='zone_value', + operation_id='operation_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].operation_id == 'operation_id_value' + + +def test_cancel_operation_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.cancel_operation( + cluster_service.CancelOperationRequest(), + project_id='project_id_value', + zone='zone_value', + operation_id='operation_id_value', + ) + + +@pytest.mark.asyncio +async def test_cancel_operation_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.cancel_operation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.cancel_operation( + project_id='project_id_value', + zone='zone_value', + operation_id='operation_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].operation_id == 'operation_id_value' + + +@pytest.mark.asyncio +async def test_cancel_operation_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.cancel_operation( + cluster_service.CancelOperationRequest(), + project_id='project_id_value', + zone='zone_value', + operation_id='operation_id_value', + ) + + +def test_get_server_config(transport: str = 'grpc', request_type=cluster_service.GetServerConfigRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_server_config), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ServerConfig( + default_cluster_version='default_cluster_version_value', + valid_node_versions=['valid_node_versions_value'], + default_image_type='default_image_type_value', + valid_image_types=['valid_image_types_value'], + valid_master_versions=['valid_master_versions_value'], + ) + response = client.get_server_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.GetServerConfigRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.ServerConfig) + assert response.default_cluster_version == 'default_cluster_version_value' + assert response.valid_node_versions == ['valid_node_versions_value'] + assert response.default_image_type == 'default_image_type_value' + assert response.valid_image_types == ['valid_image_types_value'] + assert response.valid_master_versions == ['valid_master_versions_value'] + + +def test_get_server_config_from_dict(): + test_get_server_config(request_type=dict) + + +def test_get_server_config_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_server_config), + '__call__') as call: + client.get_server_config() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.GetServerConfigRequest() + + +@pytest.mark.asyncio +async def test_get_server_config_async(transport: str = 'grpc_asyncio', request_type=cluster_service.GetServerConfigRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_server_config), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ServerConfig( + default_cluster_version='default_cluster_version_value', + valid_node_versions=['valid_node_versions_value'], + default_image_type='default_image_type_value', + valid_image_types=['valid_image_types_value'], + valid_master_versions=['valid_master_versions_value'], + )) + response = await client.get_server_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.GetServerConfigRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.ServerConfig) + assert response.default_cluster_version == 'default_cluster_version_value' + assert response.valid_node_versions == ['valid_node_versions_value'] + assert response.default_image_type == 'default_image_type_value' + assert response.valid_image_types == ['valid_image_types_value'] + assert response.valid_master_versions == ['valid_master_versions_value'] + + +@pytest.mark.asyncio +async def test_get_server_config_async_from_dict(): + await test_get_server_config_async(request_type=dict) + + +def test_get_server_config_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.GetServerConfigRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_server_config), + '__call__') as call: + call.return_value = cluster_service.ServerConfig() + client.get_server_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_server_config_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.GetServerConfigRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_server_config), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ServerConfig()) + await client.get_server_config(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_server_config_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_server_config), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ServerConfig() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_server_config( + project_id='project_id_value', + zone='zone_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + + +def test_get_server_config_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_server_config( + cluster_service.GetServerConfigRequest(), + project_id='project_id_value', + zone='zone_value', + ) + + +@pytest.mark.asyncio +async def test_get_server_config_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_server_config), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ServerConfig() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ServerConfig()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_server_config( + project_id='project_id_value', + zone='zone_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + + +@pytest.mark.asyncio +async def test_get_server_config_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_server_config( + cluster_service.GetServerConfigRequest(), + project_id='project_id_value', + zone='zone_value', + ) + + +def test_list_node_pools(transport: str = 'grpc', request_type=cluster_service.ListNodePoolsRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_node_pools), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListNodePoolsResponse( + ) + response = client.list_node_pools(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.ListNodePoolsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.ListNodePoolsResponse) + + +def test_list_node_pools_from_dict(): + test_list_node_pools(request_type=dict) + + +def test_list_node_pools_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_node_pools), + '__call__') as call: + client.list_node_pools() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.ListNodePoolsRequest() + + +@pytest.mark.asyncio +async def test_list_node_pools_async(transport: str = 'grpc_asyncio', request_type=cluster_service.ListNodePoolsRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_node_pools), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListNodePoolsResponse( + )) + response = await client.list_node_pools(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.ListNodePoolsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.ListNodePoolsResponse) + + +@pytest.mark.asyncio +async def test_list_node_pools_async_from_dict(): + await test_list_node_pools_async(request_type=dict) + + +def test_list_node_pools_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.ListNodePoolsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_node_pools), + '__call__') as call: + call.return_value = cluster_service.ListNodePoolsResponse() + client.list_node_pools(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_node_pools_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.ListNodePoolsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_node_pools), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListNodePoolsResponse()) + await client.list_node_pools(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_node_pools_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_node_pools), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListNodePoolsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_node_pools( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + + +def test_list_node_pools_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_node_pools( + cluster_service.ListNodePoolsRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + ) + + +@pytest.mark.asyncio +async def test_list_node_pools_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_node_pools), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListNodePoolsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListNodePoolsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_node_pools( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + + +@pytest.mark.asyncio +async def test_list_node_pools_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_node_pools( + cluster_service.ListNodePoolsRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + ) + + +def test_get_json_web_keys(transport: str = 'grpc', request_type=cluster_service.GetJSONWebKeysRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_json_web_keys), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.GetJSONWebKeysResponse( + ) + response = client.get_json_web_keys(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.GetJSONWebKeysRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.GetJSONWebKeysResponse) + + +def test_get_json_web_keys_from_dict(): + test_get_json_web_keys(request_type=dict) + + +def test_get_json_web_keys_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_json_web_keys), + '__call__') as call: + client.get_json_web_keys() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.GetJSONWebKeysRequest() + + +@pytest.mark.asyncio +async def test_get_json_web_keys_async(transport: str = 'grpc_asyncio', request_type=cluster_service.GetJSONWebKeysRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_json_web_keys), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.GetJSONWebKeysResponse( + )) + response = await client.get_json_web_keys(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.GetJSONWebKeysRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.GetJSONWebKeysResponse) + + +@pytest.mark.asyncio +async def test_get_json_web_keys_async_from_dict(): + await test_get_json_web_keys_async(request_type=dict) + + +def test_get_json_web_keys_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.GetJSONWebKeysRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_json_web_keys), + '__call__') as call: + call.return_value = cluster_service.GetJSONWebKeysResponse() + client.get_json_web_keys(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_json_web_keys_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.GetJSONWebKeysRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_json_web_keys), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.GetJSONWebKeysResponse()) + await client.get_json_web_keys(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_get_node_pool(transport: str = 'grpc', request_type=cluster_service.GetNodePoolRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_node_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.NodePool( + name='name_value', + initial_node_count=1911, + locations=['locations_value'], + self_link='self_link_value', + version='version_value', + instance_group_urls=['instance_group_urls_value'], + status=cluster_service.NodePool.Status.PROVISIONING, + status_message='status_message_value', + pod_ipv4_cidr_size=1856, + ) + response = client.get_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.GetNodePoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.NodePool) + assert response.name == 'name_value' + assert response.initial_node_count == 1911 + assert response.locations == ['locations_value'] + assert response.self_link == 'self_link_value' + assert response.version == 'version_value' + assert response.instance_group_urls == ['instance_group_urls_value'] + assert response.status == cluster_service.NodePool.Status.PROVISIONING + assert response.status_message == 'status_message_value' + assert response.pod_ipv4_cidr_size == 1856 + + +def test_get_node_pool_from_dict(): + test_get_node_pool(request_type=dict) + + +def test_get_node_pool_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_node_pool), + '__call__') as call: + client.get_node_pool() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.GetNodePoolRequest() + + +@pytest.mark.asyncio +async def test_get_node_pool_async(transport: str = 'grpc_asyncio', request_type=cluster_service.GetNodePoolRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_node_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.NodePool( + name='name_value', + initial_node_count=1911, + locations=['locations_value'], + self_link='self_link_value', + version='version_value', + instance_group_urls=['instance_group_urls_value'], + status=cluster_service.NodePool.Status.PROVISIONING, + status_message='status_message_value', + pod_ipv4_cidr_size=1856, + )) + response = await client.get_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.GetNodePoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.NodePool) + assert response.name == 'name_value' + assert response.initial_node_count == 1911 + assert response.locations == ['locations_value'] + assert response.self_link == 'self_link_value' + assert response.version == 'version_value' + assert response.instance_group_urls == ['instance_group_urls_value'] + assert response.status == cluster_service.NodePool.Status.PROVISIONING + assert response.status_message == 'status_message_value' + assert response.pod_ipv4_cidr_size == 1856 + + +@pytest.mark.asyncio +async def test_get_node_pool_async_from_dict(): + await test_get_node_pool_async(request_type=dict) + + +def test_get_node_pool_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.GetNodePoolRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_node_pool), + '__call__') as call: + call.return_value = cluster_service.NodePool() + client.get_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_node_pool_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.GetNodePoolRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_node_pool), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.NodePool()) + await client.get_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_get_node_pool_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_node_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.NodePool() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_node_pool( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + node_pool_id='node_pool_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].node_pool_id == 'node_pool_id_value' + + +def test_get_node_pool_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_node_pool( + cluster_service.GetNodePoolRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + node_pool_id='node_pool_id_value', + ) + + +@pytest.mark.asyncio +async def test_get_node_pool_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_node_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.NodePool() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.NodePool()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_node_pool( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + node_pool_id='node_pool_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].node_pool_id == 'node_pool_id_value' + + +@pytest.mark.asyncio +async def test_get_node_pool_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_node_pool( + cluster_service.GetNodePoolRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + node_pool_id='node_pool_id_value', + ) + + +def test_create_node_pool(transport: str = 'grpc', request_type=cluster_service.CreateNodePoolRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_node_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + ) + response = client.create_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.CreateNodePoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +def test_create_node_pool_from_dict(): + test_create_node_pool(request_type=dict) + + +def test_create_node_pool_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_node_pool), + '__call__') as call: + client.create_node_pool() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.CreateNodePoolRequest() + + +@pytest.mark.asyncio +async def test_create_node_pool_async(transport: str = 'grpc_asyncio', request_type=cluster_service.CreateNodePoolRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_node_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + )) + response = await client.create_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.CreateNodePoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +@pytest.mark.asyncio +async def test_create_node_pool_async_from_dict(): + await test_create_node_pool_async(request_type=dict) + + +def test_create_node_pool_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.CreateNodePoolRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_node_pool), + '__call__') as call: + call.return_value = cluster_service.Operation() + client.create_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_node_pool_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.CreateNodePoolRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_node_pool), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + await client.create_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_create_node_pool_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_node_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_node_pool( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + node_pool=cluster_service.NodePool(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].node_pool == cluster_service.NodePool(name='name_value') + + +def test_create_node_pool_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_node_pool( + cluster_service.CreateNodePoolRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + node_pool=cluster_service.NodePool(name='name_value'), + ) + + +@pytest.mark.asyncio +async def test_create_node_pool_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_node_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_node_pool( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + node_pool=cluster_service.NodePool(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].node_pool == cluster_service.NodePool(name='name_value') + + +@pytest.mark.asyncio +async def test_create_node_pool_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_node_pool( + cluster_service.CreateNodePoolRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + node_pool=cluster_service.NodePool(name='name_value'), + ) + + +def test_delete_node_pool(transport: str = 'grpc', request_type=cluster_service.DeleteNodePoolRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_node_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + ) + response = client.delete_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.DeleteNodePoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +def test_delete_node_pool_from_dict(): + test_delete_node_pool(request_type=dict) + + +def test_delete_node_pool_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_node_pool), + '__call__') as call: + client.delete_node_pool() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.DeleteNodePoolRequest() + + +@pytest.mark.asyncio +async def test_delete_node_pool_async(transport: str = 'grpc_asyncio', request_type=cluster_service.DeleteNodePoolRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_node_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + )) + response = await client.delete_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.DeleteNodePoolRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +@pytest.mark.asyncio +async def test_delete_node_pool_async_from_dict(): + await test_delete_node_pool_async(request_type=dict) + + +def test_delete_node_pool_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.DeleteNodePoolRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_node_pool), + '__call__') as call: + call.return_value = cluster_service.Operation() + client.delete_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_node_pool_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.DeleteNodePoolRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_node_pool), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + await client.delete_node_pool(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_delete_node_pool_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_node_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_node_pool( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + node_pool_id='node_pool_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].node_pool_id == 'node_pool_id_value' + + +def test_delete_node_pool_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_node_pool( + cluster_service.DeleteNodePoolRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + node_pool_id='node_pool_id_value', + ) + + +@pytest.mark.asyncio +async def test_delete_node_pool_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_node_pool), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_node_pool( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + node_pool_id='node_pool_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].node_pool_id == 'node_pool_id_value' + + +@pytest.mark.asyncio +async def test_delete_node_pool_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_node_pool( + cluster_service.DeleteNodePoolRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + node_pool_id='node_pool_id_value', + ) + + +def test_rollback_node_pool_upgrade(transport: str = 'grpc', request_type=cluster_service.RollbackNodePoolUpgradeRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.rollback_node_pool_upgrade), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + ) + response = client.rollback_node_pool_upgrade(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.RollbackNodePoolUpgradeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +def test_rollback_node_pool_upgrade_from_dict(): + test_rollback_node_pool_upgrade(request_type=dict) + + +def test_rollback_node_pool_upgrade_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.rollback_node_pool_upgrade), + '__call__') as call: + client.rollback_node_pool_upgrade() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.RollbackNodePoolUpgradeRequest() + + +@pytest.mark.asyncio +async def test_rollback_node_pool_upgrade_async(transport: str = 'grpc_asyncio', request_type=cluster_service.RollbackNodePoolUpgradeRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.rollback_node_pool_upgrade), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + )) + response = await client.rollback_node_pool_upgrade(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.RollbackNodePoolUpgradeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +@pytest.mark.asyncio +async def test_rollback_node_pool_upgrade_async_from_dict(): + await test_rollback_node_pool_upgrade_async(request_type=dict) + + +def test_rollback_node_pool_upgrade_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.RollbackNodePoolUpgradeRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.rollback_node_pool_upgrade), + '__call__') as call: + call.return_value = cluster_service.Operation() + client.rollback_node_pool_upgrade(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_rollback_node_pool_upgrade_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.RollbackNodePoolUpgradeRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.rollback_node_pool_upgrade), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + await client.rollback_node_pool_upgrade(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_rollback_node_pool_upgrade_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.rollback_node_pool_upgrade), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.rollback_node_pool_upgrade( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + node_pool_id='node_pool_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].node_pool_id == 'node_pool_id_value' + + +def test_rollback_node_pool_upgrade_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.rollback_node_pool_upgrade( + cluster_service.RollbackNodePoolUpgradeRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + node_pool_id='node_pool_id_value', + ) + + +@pytest.mark.asyncio +async def test_rollback_node_pool_upgrade_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.rollback_node_pool_upgrade), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.rollback_node_pool_upgrade( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + node_pool_id='node_pool_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].node_pool_id == 'node_pool_id_value' + + +@pytest.mark.asyncio +async def test_rollback_node_pool_upgrade_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.rollback_node_pool_upgrade( + cluster_service.RollbackNodePoolUpgradeRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + node_pool_id='node_pool_id_value', + ) + + +def test_set_node_pool_management(transport: str = 'grpc', request_type=cluster_service.SetNodePoolManagementRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_node_pool_management), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + ) + response = client.set_node_pool_management(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetNodePoolManagementRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +def test_set_node_pool_management_from_dict(): + test_set_node_pool_management(request_type=dict) + + +def test_set_node_pool_management_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_node_pool_management), + '__call__') as call: + client.set_node_pool_management() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetNodePoolManagementRequest() + + +@pytest.mark.asyncio +async def test_set_node_pool_management_async(transport: str = 'grpc_asyncio', request_type=cluster_service.SetNodePoolManagementRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_node_pool_management), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + )) + response = await client.set_node_pool_management(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetNodePoolManagementRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +@pytest.mark.asyncio +async def test_set_node_pool_management_async_from_dict(): + await test_set_node_pool_management_async(request_type=dict) + + +def test_set_node_pool_management_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetNodePoolManagementRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_node_pool_management), + '__call__') as call: + call.return_value = cluster_service.Operation() + client.set_node_pool_management(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_set_node_pool_management_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetNodePoolManagementRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_node_pool_management), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + await client.set_node_pool_management(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_set_node_pool_management_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_node_pool_management), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.set_node_pool_management( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + node_pool_id='node_pool_id_value', + management=cluster_service.NodeManagement(auto_upgrade=True), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].node_pool_id == 'node_pool_id_value' + assert args[0].management == cluster_service.NodeManagement(auto_upgrade=True) + + +def test_set_node_pool_management_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_node_pool_management( + cluster_service.SetNodePoolManagementRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + node_pool_id='node_pool_id_value', + management=cluster_service.NodeManagement(auto_upgrade=True), + ) + + +@pytest.mark.asyncio +async def test_set_node_pool_management_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_node_pool_management), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.set_node_pool_management( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + node_pool_id='node_pool_id_value', + management=cluster_service.NodeManagement(auto_upgrade=True), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].node_pool_id == 'node_pool_id_value' + assert args[0].management == cluster_service.NodeManagement(auto_upgrade=True) + + +@pytest.mark.asyncio +async def test_set_node_pool_management_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.set_node_pool_management( + cluster_service.SetNodePoolManagementRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + node_pool_id='node_pool_id_value', + management=cluster_service.NodeManagement(auto_upgrade=True), + ) + + +def test_set_labels(transport: str = 'grpc', request_type=cluster_service.SetLabelsRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_labels), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + ) + response = client.set_labels(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetLabelsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +def test_set_labels_from_dict(): + test_set_labels(request_type=dict) + + +def test_set_labels_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_labels), + '__call__') as call: + client.set_labels() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetLabelsRequest() + + +@pytest.mark.asyncio +async def test_set_labels_async(transport: str = 'grpc_asyncio', request_type=cluster_service.SetLabelsRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_labels), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + )) + response = await client.set_labels(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetLabelsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +@pytest.mark.asyncio +async def test_set_labels_async_from_dict(): + await test_set_labels_async(request_type=dict) + + +def test_set_labels_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetLabelsRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_labels), + '__call__') as call: + call.return_value = cluster_service.Operation() + client.set_labels(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_set_labels_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetLabelsRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_labels), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + await client.set_labels(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_set_labels_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_labels), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.set_labels( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + resource_labels={'key_value': 'value_value'}, + label_fingerprint='label_fingerprint_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].resource_labels == {'key_value': 'value_value'} + assert args[0].label_fingerprint == 'label_fingerprint_value' + + +def test_set_labels_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_labels( + cluster_service.SetLabelsRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + resource_labels={'key_value': 'value_value'}, + label_fingerprint='label_fingerprint_value', + ) + + +@pytest.mark.asyncio +async def test_set_labels_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_labels), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.set_labels( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + resource_labels={'key_value': 'value_value'}, + label_fingerprint='label_fingerprint_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].resource_labels == {'key_value': 'value_value'} + assert args[0].label_fingerprint == 'label_fingerprint_value' + + +@pytest.mark.asyncio +async def test_set_labels_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.set_labels( + cluster_service.SetLabelsRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + resource_labels={'key_value': 'value_value'}, + label_fingerprint='label_fingerprint_value', + ) + + +def test_set_legacy_abac(transport: str = 'grpc', request_type=cluster_service.SetLegacyAbacRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_legacy_abac), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + ) + response = client.set_legacy_abac(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetLegacyAbacRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +def test_set_legacy_abac_from_dict(): + test_set_legacy_abac(request_type=dict) + + +def test_set_legacy_abac_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_legacy_abac), + '__call__') as call: + client.set_legacy_abac() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetLegacyAbacRequest() + + +@pytest.mark.asyncio +async def test_set_legacy_abac_async(transport: str = 'grpc_asyncio', request_type=cluster_service.SetLegacyAbacRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_legacy_abac), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + )) + response = await client.set_legacy_abac(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetLegacyAbacRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +@pytest.mark.asyncio +async def test_set_legacy_abac_async_from_dict(): + await test_set_legacy_abac_async(request_type=dict) + + +def test_set_legacy_abac_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetLegacyAbacRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_legacy_abac), + '__call__') as call: + call.return_value = cluster_service.Operation() + client.set_legacy_abac(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_set_legacy_abac_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetLegacyAbacRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_legacy_abac), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + await client.set_legacy_abac(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_set_legacy_abac_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_legacy_abac), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.set_legacy_abac( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + enabled=True, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].enabled == True + + +def test_set_legacy_abac_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_legacy_abac( + cluster_service.SetLegacyAbacRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + enabled=True, + ) + + +@pytest.mark.asyncio +async def test_set_legacy_abac_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_legacy_abac), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.set_legacy_abac( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + enabled=True, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].enabled == True + + +@pytest.mark.asyncio +async def test_set_legacy_abac_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.set_legacy_abac( + cluster_service.SetLegacyAbacRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + enabled=True, + ) + + +def test_start_ip_rotation(transport: str = 'grpc', request_type=cluster_service.StartIPRotationRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.start_ip_rotation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + ) + response = client.start_ip_rotation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.StartIPRotationRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +def test_start_ip_rotation_from_dict(): + test_start_ip_rotation(request_type=dict) + + +def test_start_ip_rotation_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.start_ip_rotation), + '__call__') as call: + client.start_ip_rotation() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.StartIPRotationRequest() + + +@pytest.mark.asyncio +async def test_start_ip_rotation_async(transport: str = 'grpc_asyncio', request_type=cluster_service.StartIPRotationRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.start_ip_rotation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + )) + response = await client.start_ip_rotation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.StartIPRotationRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +@pytest.mark.asyncio +async def test_start_ip_rotation_async_from_dict(): + await test_start_ip_rotation_async(request_type=dict) + + +def test_start_ip_rotation_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.StartIPRotationRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.start_ip_rotation), + '__call__') as call: + call.return_value = cluster_service.Operation() + client.start_ip_rotation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_start_ip_rotation_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.StartIPRotationRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.start_ip_rotation), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + await client.start_ip_rotation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_start_ip_rotation_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.start_ip_rotation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.start_ip_rotation( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + + +def test_start_ip_rotation_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.start_ip_rotation( + cluster_service.StartIPRotationRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + ) + + +@pytest.mark.asyncio +async def test_start_ip_rotation_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.start_ip_rotation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.start_ip_rotation( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + + +@pytest.mark.asyncio +async def test_start_ip_rotation_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.start_ip_rotation( + cluster_service.StartIPRotationRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + ) + + +def test_complete_ip_rotation(transport: str = 'grpc', request_type=cluster_service.CompleteIPRotationRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.complete_ip_rotation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + ) + response = client.complete_ip_rotation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.CompleteIPRotationRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +def test_complete_ip_rotation_from_dict(): + test_complete_ip_rotation(request_type=dict) + + +def test_complete_ip_rotation_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.complete_ip_rotation), + '__call__') as call: + client.complete_ip_rotation() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.CompleteIPRotationRequest() + + +@pytest.mark.asyncio +async def test_complete_ip_rotation_async(transport: str = 'grpc_asyncio', request_type=cluster_service.CompleteIPRotationRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.complete_ip_rotation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + )) + response = await client.complete_ip_rotation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.CompleteIPRotationRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +@pytest.mark.asyncio +async def test_complete_ip_rotation_async_from_dict(): + await test_complete_ip_rotation_async(request_type=dict) + + +def test_complete_ip_rotation_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.CompleteIPRotationRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.complete_ip_rotation), + '__call__') as call: + call.return_value = cluster_service.Operation() + client.complete_ip_rotation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_complete_ip_rotation_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.CompleteIPRotationRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.complete_ip_rotation), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + await client.complete_ip_rotation(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_complete_ip_rotation_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.complete_ip_rotation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.complete_ip_rotation( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + + +def test_complete_ip_rotation_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.complete_ip_rotation( + cluster_service.CompleteIPRotationRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + ) + + +@pytest.mark.asyncio +async def test_complete_ip_rotation_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.complete_ip_rotation), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.complete_ip_rotation( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + + +@pytest.mark.asyncio +async def test_complete_ip_rotation_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.complete_ip_rotation( + cluster_service.CompleteIPRotationRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + ) + + +def test_set_node_pool_size(transport: str = 'grpc', request_type=cluster_service.SetNodePoolSizeRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_node_pool_size), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + ) + response = client.set_node_pool_size(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetNodePoolSizeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +def test_set_node_pool_size_from_dict(): + test_set_node_pool_size(request_type=dict) + + +def test_set_node_pool_size_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_node_pool_size), + '__call__') as call: + client.set_node_pool_size() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetNodePoolSizeRequest() + + +@pytest.mark.asyncio +async def test_set_node_pool_size_async(transport: str = 'grpc_asyncio', request_type=cluster_service.SetNodePoolSizeRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_node_pool_size), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + )) + response = await client.set_node_pool_size(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetNodePoolSizeRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +@pytest.mark.asyncio +async def test_set_node_pool_size_async_from_dict(): + await test_set_node_pool_size_async(request_type=dict) + + +def test_set_node_pool_size_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetNodePoolSizeRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_node_pool_size), + '__call__') as call: + call.return_value = cluster_service.Operation() + client.set_node_pool_size(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_set_node_pool_size_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetNodePoolSizeRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_node_pool_size), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + await client.set_node_pool_size(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_set_network_policy(transport: str = 'grpc', request_type=cluster_service.SetNetworkPolicyRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_network_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + ) + response = client.set_network_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetNetworkPolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +def test_set_network_policy_from_dict(): + test_set_network_policy(request_type=dict) + + +def test_set_network_policy_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_network_policy), + '__call__') as call: + client.set_network_policy() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetNetworkPolicyRequest() + + +@pytest.mark.asyncio +async def test_set_network_policy_async(transport: str = 'grpc_asyncio', request_type=cluster_service.SetNetworkPolicyRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_network_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + )) + response = await client.set_network_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetNetworkPolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +@pytest.mark.asyncio +async def test_set_network_policy_async_from_dict(): + await test_set_network_policy_async(request_type=dict) + + +def test_set_network_policy_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetNetworkPolicyRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_network_policy), + '__call__') as call: + call.return_value = cluster_service.Operation() + client.set_network_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_set_network_policy_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetNetworkPolicyRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_network_policy), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + await client.set_network_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_set_network_policy_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_network_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.set_network_policy( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + network_policy=cluster_service.NetworkPolicy(provider=cluster_service.NetworkPolicy.Provider.CALICO), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].network_policy == cluster_service.NetworkPolicy(provider=cluster_service.NetworkPolicy.Provider.CALICO) + + +def test_set_network_policy_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_network_policy( + cluster_service.SetNetworkPolicyRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + network_policy=cluster_service.NetworkPolicy(provider=cluster_service.NetworkPolicy.Provider.CALICO), + ) + + +@pytest.mark.asyncio +async def test_set_network_policy_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_network_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.set_network_policy( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + network_policy=cluster_service.NetworkPolicy(provider=cluster_service.NetworkPolicy.Provider.CALICO), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].network_policy == cluster_service.NetworkPolicy(provider=cluster_service.NetworkPolicy.Provider.CALICO) + + +@pytest.mark.asyncio +async def test_set_network_policy_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.set_network_policy( + cluster_service.SetNetworkPolicyRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + network_policy=cluster_service.NetworkPolicy(provider=cluster_service.NetworkPolicy.Provider.CALICO), + ) + + +def test_set_maintenance_policy(transport: str = 'grpc', request_type=cluster_service.SetMaintenancePolicyRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_maintenance_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + ) + response = client.set_maintenance_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetMaintenancePolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +def test_set_maintenance_policy_from_dict(): + test_set_maintenance_policy(request_type=dict) + + +def test_set_maintenance_policy_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_maintenance_policy), + '__call__') as call: + client.set_maintenance_policy() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetMaintenancePolicyRequest() + + +@pytest.mark.asyncio +async def test_set_maintenance_policy_async(transport: str = 'grpc_asyncio', request_type=cluster_service.SetMaintenancePolicyRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_maintenance_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( + name='name_value', + zone='zone_value', + operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, + status=cluster_service.Operation.Status.PENDING, + detail='detail_value', + status_message='status_message_value', + self_link='self_link_value', + target_link='target_link_value', + location='location_value', + start_time='start_time_value', + end_time='end_time_value', + )) + response = await client.set_maintenance_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.SetMaintenancePolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.Operation) + assert response.name == 'name_value' + assert response.zone == 'zone_value' + assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER + assert response.status == cluster_service.Operation.Status.PENDING + assert response.detail == 'detail_value' + assert response.status_message == 'status_message_value' + assert response.self_link == 'self_link_value' + assert response.target_link == 'target_link_value' + assert response.location == 'location_value' + assert response.start_time == 'start_time_value' + assert response.end_time == 'end_time_value' + + +@pytest.mark.asyncio +async def test_set_maintenance_policy_async_from_dict(): + await test_set_maintenance_policy_async(request_type=dict) + + +def test_set_maintenance_policy_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetMaintenancePolicyRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_maintenance_policy), + '__call__') as call: + call.return_value = cluster_service.Operation() + client.set_maintenance_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_set_maintenance_policy_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.SetMaintenancePolicyRequest() + + request.name = 'name/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_maintenance_policy), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + await client.set_maintenance_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name/value', + ) in kw['metadata'] + + +def test_set_maintenance_policy_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_maintenance_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.set_maintenance_policy( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + maintenance_policy=cluster_service.MaintenancePolicy(window=cluster_service.MaintenanceWindow(daily_maintenance_window=cluster_service.DailyMaintenanceWindow(start_time='start_time_value'))), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].maintenance_policy == cluster_service.MaintenancePolicy(window=cluster_service.MaintenanceWindow(daily_maintenance_window=cluster_service.DailyMaintenanceWindow(start_time='start_time_value'))) + + +def test_set_maintenance_policy_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_maintenance_policy( + cluster_service.SetMaintenancePolicyRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + maintenance_policy=cluster_service.MaintenancePolicy(window=cluster_service.MaintenanceWindow(daily_maintenance_window=cluster_service.DailyMaintenanceWindow(start_time='start_time_value'))), + ) + + +@pytest.mark.asyncio +async def test_set_maintenance_policy_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_maintenance_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.Operation() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.set_maintenance_policy( + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + maintenance_policy=cluster_service.MaintenancePolicy(window=cluster_service.MaintenanceWindow(daily_maintenance_window=cluster_service.DailyMaintenanceWindow(start_time='start_time_value'))), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].project_id == 'project_id_value' + assert args[0].zone == 'zone_value' + assert args[0].cluster_id == 'cluster_id_value' + assert args[0].maintenance_policy == cluster_service.MaintenancePolicy(window=cluster_service.MaintenanceWindow(daily_maintenance_window=cluster_service.DailyMaintenanceWindow(start_time='start_time_value'))) + + +@pytest.mark.asyncio +async def test_set_maintenance_policy_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.set_maintenance_policy( + cluster_service.SetMaintenancePolicyRequest(), + project_id='project_id_value', + zone='zone_value', + cluster_id='cluster_id_value', + maintenance_policy=cluster_service.MaintenancePolicy(window=cluster_service.MaintenanceWindow(daily_maintenance_window=cluster_service.DailyMaintenanceWindow(start_time='start_time_value'))), + ) + + +def test_list_usable_subnetworks(transport: str = 'grpc', request_type=cluster_service.ListUsableSubnetworksRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_usable_subnetworks), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListUsableSubnetworksResponse( + next_page_token='next_page_token_value', + ) + response = client.list_usable_subnetworks(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.ListUsableSubnetworksRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListUsableSubnetworksPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_usable_subnetworks_from_dict(): + test_list_usable_subnetworks(request_type=dict) + + +def test_list_usable_subnetworks_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_usable_subnetworks), + '__call__') as call: + client.list_usable_subnetworks() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.ListUsableSubnetworksRequest() + + +@pytest.mark.asyncio +async def test_list_usable_subnetworks_async(transport: str = 'grpc_asyncio', request_type=cluster_service.ListUsableSubnetworksRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_usable_subnetworks), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListUsableSubnetworksResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_usable_subnetworks(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.ListUsableSubnetworksRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListUsableSubnetworksAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_usable_subnetworks_async_from_dict(): + await test_list_usable_subnetworks_async(request_type=dict) + + +def test_list_usable_subnetworks_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.ListUsableSubnetworksRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_usable_subnetworks), + '__call__') as call: + call.return_value = cluster_service.ListUsableSubnetworksResponse() + client.list_usable_subnetworks(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_usable_subnetworks_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.ListUsableSubnetworksRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_usable_subnetworks), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListUsableSubnetworksResponse()) + await client.list_usable_subnetworks(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_usable_subnetworks_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_usable_subnetworks), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListUsableSubnetworksResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_usable_subnetworks( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_usable_subnetworks_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_usable_subnetworks( + cluster_service.ListUsableSubnetworksRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_usable_subnetworks_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_usable_subnetworks), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListUsableSubnetworksResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListUsableSubnetworksResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_usable_subnetworks( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_usable_subnetworks_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_usable_subnetworks( + cluster_service.ListUsableSubnetworksRequest(), + parent='parent_value', + ) + + +def test_list_usable_subnetworks_pager(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_usable_subnetworks), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[ + cluster_service.UsableSubnetwork(), + cluster_service.UsableSubnetwork(), + cluster_service.UsableSubnetwork(), + ], + next_page_token='abc', + ), + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[], + next_page_token='def', + ), + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[ + cluster_service.UsableSubnetwork(), + ], + next_page_token='ghi', + ), + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[ + cluster_service.UsableSubnetwork(), + cluster_service.UsableSubnetwork(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_usable_subnetworks(request={}) + + assert pager._metadata == metadata + + results = [i for i in pager] + assert len(results) == 6 + assert all(isinstance(i, cluster_service.UsableSubnetwork) + for i in results) + +def test_list_usable_subnetworks_pages(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_usable_subnetworks), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[ + cluster_service.UsableSubnetwork(), + cluster_service.UsableSubnetwork(), + cluster_service.UsableSubnetwork(), + ], + next_page_token='abc', + ), + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[], + next_page_token='def', + ), + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[ + cluster_service.UsableSubnetwork(), + ], + next_page_token='ghi', + ), + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[ + cluster_service.UsableSubnetwork(), + cluster_service.UsableSubnetwork(), + ], + ), + RuntimeError, + ) + pages = list(client.list_usable_subnetworks(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_usable_subnetworks_async_pager(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_usable_subnetworks), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[ + cluster_service.UsableSubnetwork(), + cluster_service.UsableSubnetwork(), + cluster_service.UsableSubnetwork(), + ], + next_page_token='abc', + ), + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[], + next_page_token='def', + ), + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[ + cluster_service.UsableSubnetwork(), + ], + next_page_token='ghi', + ), + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[ + cluster_service.UsableSubnetwork(), + cluster_service.UsableSubnetwork(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_usable_subnetworks(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, cluster_service.UsableSubnetwork) + for i in responses) + +@pytest.mark.asyncio +async def test_list_usable_subnetworks_async_pages(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_usable_subnetworks), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[ + cluster_service.UsableSubnetwork(), + cluster_service.UsableSubnetwork(), + cluster_service.UsableSubnetwork(), + ], + next_page_token='abc', + ), + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[], + next_page_token='def', + ), + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[ + cluster_service.UsableSubnetwork(), + ], + next_page_token='ghi', + ), + cluster_service.ListUsableSubnetworksResponse( + subnetworks=[ + cluster_service.UsableSubnetwork(), + cluster_service.UsableSubnetwork(), + ], + ), + RuntimeError, + ) + pages = [] + async for page_ in (await client.list_usable_subnetworks(request={})).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +def test_list_locations(transport: str = 'grpc', request_type=cluster_service.ListLocationsRequest): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_locations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListLocationsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_locations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.ListLocationsRequest() + + # Establish that the response is the type that we expect. + assert response.raw_page is response + assert isinstance(response, cluster_service.ListLocationsResponse) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_locations_from_dict(): + test_list_locations(request_type=dict) + + +def test_list_locations_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_locations), + '__call__') as call: + client.list_locations() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.ListLocationsRequest() + + +@pytest.mark.asyncio +async def test_list_locations_async(transport: str = 'grpc_asyncio', request_type=cluster_service.ListLocationsRequest): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_locations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListLocationsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_locations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == cluster_service.ListLocationsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, cluster_service.ListLocationsResponse) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_locations_async_from_dict(): + await test_list_locations_async(request_type=dict) + + +def test_list_locations_field_headers(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.ListLocationsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_locations), + '__call__') as call: + call.return_value = cluster_service.ListLocationsResponse() + client.list_locations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_locations_field_headers_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = cluster_service.ListLocationsRequest() + + request.parent = 'parent/value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_locations), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListLocationsResponse()) + await client.list_locations(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent/value', + ) in kw['metadata'] + + +def test_list_locations_flattened(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_locations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListLocationsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_locations( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +def test_list_locations_flattened_error(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_locations( + cluster_service.ListLocationsRequest(), + parent='parent_value', + ) + + +@pytest.mark.asyncio +async def test_list_locations_flattened_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_locations), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = cluster_service.ListLocationsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListLocationsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_locations( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0].parent == 'parent_value' + + +@pytest.mark.asyncio +async def test_list_locations_flattened_error_async(): + client = ClusterManagerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_locations( + cluster_service.ListLocationsRequest(), + parent='parent_value', + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.ClusterManagerGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.ClusterManagerGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ClusterManagerClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.ClusterManagerGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ClusterManagerClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.ClusterManagerGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = ClusterManagerClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.ClusterManagerGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.ClusterManagerGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.ClusterManagerGrpcTransport, + transports.ClusterManagerGrpcAsyncIOTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.ClusterManagerGrpcTransport, + ) + +def test_cluster_manager_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.ClusterManagerTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_cluster_manager_base_transport(): + # Instantiate the base transport. + with mock.patch('google.container_v1beta1.services.cluster_manager.transports.ClusterManagerTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.ClusterManagerTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'list_clusters', + 'get_cluster', + 'create_cluster', + 'update_cluster', + 'update_node_pool', + 'set_node_pool_autoscaling', + 'set_logging_service', + 'set_monitoring_service', + 'set_addons_config', + 'set_locations', + 'update_master', + 'set_master_auth', + 'delete_cluster', + 'list_operations', + 'get_operation', + 'cancel_operation', + 'get_server_config', + 'list_node_pools', + 'get_json_web_keys', + 'get_node_pool', + 'create_node_pool', + 'delete_node_pool', + 'rollback_node_pool_upgrade', + 'set_node_pool_management', + 'set_labels', + 'set_legacy_abac', + 'start_ip_rotation', + 'complete_ip_rotation', + 'set_node_pool_size', + 'set_network_policy', + 'set_maintenance_policy', + 'list_usable_subnetworks', + 'list_locations', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + +@requires_google_auth_gte_1_25_0 +def test_cluster_manager_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.container_v1beta1.services.cluster_manager.transports.ClusterManagerTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ClusterManagerTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@requires_google_auth_lt_1_25_0 +def test_cluster_manager_base_transport_with_credentials_file_old_google_auth(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.container_v1beta1.services.cluster_manager.transports.ClusterManagerTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ClusterManagerTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", scopes=( + 'https://www.googleapis.com/auth/cloud-platform', + ), + quota_project_id="octopus", + ) + + +def test_cluster_manager_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.container_v1beta1.services.cluster_manager.transports.ClusterManagerTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ClusterManagerTransport() + adc.assert_called_once() + + +@requires_google_auth_gte_1_25_0 +def test_cluster_manager_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + ClusterManagerClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +@requires_google_auth_lt_1_25_0 +def test_cluster_manager_auth_adc_old_google_auth(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + ClusterManagerClient() + adc.assert_called_once_with( + scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.ClusterManagerGrpcTransport, + transports.ClusterManagerGrpcAsyncIOTransport, + ], +) +@requires_google_auth_gte_1_25_0 +def test_cluster_manager_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.ClusterManagerGrpcTransport, + transports.ClusterManagerGrpcAsyncIOTransport, + ], +) +@requires_google_auth_lt_1_25_0 +def test_cluster_manager_transport_auth_adc_old_google_auth(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus") + adc.assert_called_once_with(scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.ClusterManagerGrpcTransport, grpc_helpers), + (transports.ClusterManagerGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_cluster_manager_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "container.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/cloud-platform', +), + scopes=["1", "2"], + default_host="container.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.ClusterManagerGrpcTransport, transports.ClusterManagerGrpcAsyncIOTransport]) +def test_cluster_manager_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + + +def test_cluster_manager_host_no_port(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='container.googleapis.com'), + ) + assert client.transport._host == 'container.googleapis.com:443' + + +def test_cluster_manager_host_with_port(): + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='container.googleapis.com:8000'), + ) + assert client.transport._host == 'container.googleapis.com:8000' + +def test_cluster_manager_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.ClusterManagerGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_cluster_manager_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.ClusterManagerGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.ClusterManagerGrpcTransport, transports.ClusterManagerGrpcAsyncIOTransport]) +def test_cluster_manager_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.ClusterManagerGrpcTransport, transports.ClusterManagerGrpcAsyncIOTransport]) +def test_cluster_manager_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_topic_path(): + project = "squid" + topic = "clam" + expected = "projects/{project}/topics/{topic}".format(project=project, topic=topic, ) + actual = ClusterManagerClient.topic_path(project, topic) + assert expected == actual + + +def test_parse_topic_path(): + expected = { + "project": "whelk", + "topic": "octopus", + } + path = ClusterManagerClient.topic_path(**expected) + + # Check that the path construction is reversible. + actual = ClusterManagerClient.parse_topic_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "oyster" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = ClusterManagerClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "nudibranch", + } + path = ClusterManagerClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = ClusterManagerClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "cuttlefish" + expected = "folders/{folder}".format(folder=folder, ) + actual = ClusterManagerClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "mussel", + } + path = ClusterManagerClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = ClusterManagerClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "winkle" + expected = "organizations/{organization}".format(organization=organization, ) + actual = ClusterManagerClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nautilus", + } + path = ClusterManagerClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = ClusterManagerClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "scallop" + expected = "projects/{project}".format(project=project, ) + actual = ClusterManagerClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "abalone", + } + path = ClusterManagerClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = ClusterManagerClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "squid" + location = "clam" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = ClusterManagerClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "whelk", + "location": "octopus", + } + path = ClusterManagerClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = ClusterManagerClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.ClusterManagerTransport, '_prep_wrapped_messages') as prep: + client = ClusterManagerClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.ClusterManagerTransport, '_prep_wrapped_messages') as prep: + transport_class = ClusterManagerClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) From 7c317f85a1e07bbf61b9fbef7b4952afa81e07ed Mon Sep 17 00:00:00 2001 From: Owl Bot Date: Fri, 23 Jul 2021 21:22:34 +0000 Subject: [PATCH 2/2] =?UTF-8?q?=F0=9F=A6=89=20Updates=20from=20OwlBot?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit See https://github.com/googleapis/repo-automation-bots/blob/master/packages/owl-bot/README.md --- .../services/cluster_manager/client.py | 4 + .../services/cluster_manager/client.py | 4 + owl-bot-staging/v1/.coveragerc | 17 - owl-bot-staging/v1/MANIFEST.in | 2 - owl-bot-staging/v1/README.rst | 49 - owl-bot-staging/v1/docs/conf.py | 376 - .../v1/docs/container_v1/cluster_manager.rst | 10 - .../v1/docs/container_v1/services.rst | 6 - .../v1/docs/container_v1/types.rst | 7 - owl-bot-staging/v1/docs/index.rst | 7 - .../v1/google/container/__init__.py | 213 - owl-bot-staging/v1/google/container/py.typed | 2 - .../v1/google/container_v1/__init__.py | 214 - .../google/container_v1/gapic_metadata.json | 343 - .../v1/google/container_v1/py.typed | 2 - .../google/container_v1/services/__init__.py | 15 - .../services/cluster_manager/__init__.py | 22 - .../services/cluster_manager/async_client.py | 3604 ------ .../services/cluster_manager/client.py | 3731 ------- .../services/cluster_manager/pagers.py | 140 - .../cluster_manager/transports/__init__.py | 33 - .../cluster_manager/transports/base.py | 666 -- .../cluster_manager/transports/grpc.py | 1097 -- .../transports/grpc_asyncio.py | 1101 -- .../v1/google/container_v1/types/__init__.py | 210 - .../container_v1/types/cluster_service.py | 5120 --------- owl-bot-staging/v1/mypy.ini | 3 - owl-bot-staging/v1/noxfile.py | 132 - .../v1/scripts/fixup_container_v1_keywords.py | 207 - owl-bot-staging/v1/setup.py | 54 - owl-bot-staging/v1/tests/__init__.py | 16 - owl-bot-staging/v1/tests/unit/__init__.py | 16 - .../v1/tests/unit/gapic/__init__.py | 16 - .../tests/unit/gapic/container_v1/__init__.py | 16 - .../container_v1/test_cluster_manager.py | 9434 ---------------- owl-bot-staging/v1beta1/.coveragerc | 17 - owl-bot-staging/v1beta1/MANIFEST.in | 2 - owl-bot-staging/v1beta1/README.rst | 49 - owl-bot-staging/v1beta1/docs/conf.py | 376 - .../container_v1beta1/cluster_manager.rst | 10 - .../docs/container_v1beta1/services.rst | 6 - .../v1beta1/docs/container_v1beta1/types.rst | 7 - owl-bot-staging/v1beta1/docs/index.rst | 7 - .../v1beta1/google/container/__init__.py | 249 - .../v1beta1/google/container/py.typed | 2 - .../google/container_v1beta1/__init__.py | 250 - .../container_v1beta1/gapic_metadata.json | 353 - .../v1beta1/google/container_v1beta1/py.typed | 2 - .../container_v1beta1/services/__init__.py | 15 - .../services/cluster_manager/__init__.py | 22 - .../services/cluster_manager/async_client.py | 3632 ------ .../services/cluster_manager/client.py | 3750 ------- .../services/cluster_manager/pagers.py | 140 - .../cluster_manager/transports/__init__.py | 33 - .../cluster_manager/transports/base.py | 694 -- .../cluster_manager/transports/grpc.py | 1124 -- .../transports/grpc_asyncio.py | 1128 -- .../container_v1beta1/types/__init__.py | 246 - .../types/cluster_service.py | 5866 ---------- owl-bot-staging/v1beta1/mypy.ini | 3 - owl-bot-staging/v1beta1/noxfile.py | 132 - .../fixup_container_v1beta1_keywords.py | 208 - owl-bot-staging/v1beta1/setup.py | 54 - owl-bot-staging/v1beta1/tests/__init__.py | 16 - .../v1beta1/tests/unit/__init__.py | 16 - .../v1beta1/tests/unit/gapic/__init__.py | 16 - .../unit/gapic/container_v1beta1/__init__.py | 16 - .../container_v1beta1/test_cluster_manager.py | 9846 ----------------- .../container_v1/test_cluster_manager.py | 31 +- .../container_v1beta1/test_cluster_manager.py | 31 +- 70 files changed, 44 insertions(+), 55194 deletions(-) delete mode 100644 owl-bot-staging/v1/.coveragerc delete mode 100644 owl-bot-staging/v1/MANIFEST.in delete mode 100644 owl-bot-staging/v1/README.rst delete mode 100644 owl-bot-staging/v1/docs/conf.py delete mode 100644 owl-bot-staging/v1/docs/container_v1/cluster_manager.rst delete mode 100644 owl-bot-staging/v1/docs/container_v1/services.rst delete mode 100644 owl-bot-staging/v1/docs/container_v1/types.rst delete mode 100644 owl-bot-staging/v1/docs/index.rst delete mode 100644 owl-bot-staging/v1/google/container/__init__.py delete mode 100644 owl-bot-staging/v1/google/container/py.typed delete mode 100644 owl-bot-staging/v1/google/container_v1/__init__.py delete mode 100644 owl-bot-staging/v1/google/container_v1/gapic_metadata.json delete mode 100644 owl-bot-staging/v1/google/container_v1/py.typed delete mode 100644 owl-bot-staging/v1/google/container_v1/services/__init__.py delete mode 100644 owl-bot-staging/v1/google/container_v1/services/cluster_manager/__init__.py delete mode 100644 owl-bot-staging/v1/google/container_v1/services/cluster_manager/async_client.py delete mode 100644 owl-bot-staging/v1/google/container_v1/services/cluster_manager/client.py delete mode 100644 owl-bot-staging/v1/google/container_v1/services/cluster_manager/pagers.py delete mode 100644 owl-bot-staging/v1/google/container_v1/services/cluster_manager/transports/__init__.py delete mode 100644 owl-bot-staging/v1/google/container_v1/services/cluster_manager/transports/base.py delete mode 100644 owl-bot-staging/v1/google/container_v1/services/cluster_manager/transports/grpc.py delete mode 100644 owl-bot-staging/v1/google/container_v1/services/cluster_manager/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1/google/container_v1/types/__init__.py delete mode 100644 owl-bot-staging/v1/google/container_v1/types/cluster_service.py delete mode 100644 owl-bot-staging/v1/mypy.ini delete mode 100644 owl-bot-staging/v1/noxfile.py delete mode 100644 owl-bot-staging/v1/scripts/fixup_container_v1_keywords.py delete mode 100644 owl-bot-staging/v1/setup.py delete mode 100644 owl-bot-staging/v1/tests/__init__.py delete mode 100644 owl-bot-staging/v1/tests/unit/__init__.py delete mode 100644 owl-bot-staging/v1/tests/unit/gapic/__init__.py delete mode 100644 owl-bot-staging/v1/tests/unit/gapic/container_v1/__init__.py delete mode 100644 owl-bot-staging/v1/tests/unit/gapic/container_v1/test_cluster_manager.py delete mode 100644 owl-bot-staging/v1beta1/.coveragerc delete mode 100644 owl-bot-staging/v1beta1/MANIFEST.in delete mode 100644 owl-bot-staging/v1beta1/README.rst delete mode 100644 owl-bot-staging/v1beta1/docs/conf.py delete mode 100644 owl-bot-staging/v1beta1/docs/container_v1beta1/cluster_manager.rst delete mode 100644 owl-bot-staging/v1beta1/docs/container_v1beta1/services.rst delete mode 100644 owl-bot-staging/v1beta1/docs/container_v1beta1/types.rst delete mode 100644 owl-bot-staging/v1beta1/docs/index.rst delete mode 100644 owl-bot-staging/v1beta1/google/container/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/container/py.typed delete mode 100644 owl-bot-staging/v1beta1/google/container_v1beta1/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/container_v1beta1/gapic_metadata.json delete mode 100644 owl-bot-staging/v1beta1/google/container_v1beta1/py.typed delete mode 100644 owl-bot-staging/v1beta1/google/container_v1beta1/services/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/async_client.py delete mode 100644 owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/client.py delete mode 100644 owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/pagers.py delete mode 100644 owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/transports/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/transports/base.py delete mode 100644 owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/transports/grpc.py delete mode 100644 owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/transports/grpc_asyncio.py delete mode 100644 owl-bot-staging/v1beta1/google/container_v1beta1/types/__init__.py delete mode 100644 owl-bot-staging/v1beta1/google/container_v1beta1/types/cluster_service.py delete mode 100644 owl-bot-staging/v1beta1/mypy.ini delete mode 100644 owl-bot-staging/v1beta1/noxfile.py delete mode 100644 owl-bot-staging/v1beta1/scripts/fixup_container_v1beta1_keywords.py delete mode 100644 owl-bot-staging/v1beta1/setup.py delete mode 100644 owl-bot-staging/v1beta1/tests/__init__.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/__init__.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/__init__.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/container_v1beta1/__init__.py delete mode 100644 owl-bot-staging/v1beta1/tests/unit/gapic/container_v1beta1/test_cluster_manager.py diff --git a/google/cloud/container_v1/services/cluster_manager/client.py b/google/cloud/container_v1/services/cluster_manager/client.py index fe0a1abf..c3221cca 100644 --- a/google/cloud/container_v1/services/cluster_manager/client.py +++ b/google/cloud/container_v1/services/cluster_manager/client.py @@ -329,6 +329,10 @@ def __init__( client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, + always_use_jwt_access=( + Transport == type(self).get_transport_class("grpc") + or Transport == type(self).get_transport_class("grpc_asyncio") + ), ) def list_clusters( diff --git a/google/cloud/container_v1beta1/services/cluster_manager/client.py b/google/cloud/container_v1beta1/services/cluster_manager/client.py index cba81caf..6a8b3934 100644 --- a/google/cloud/container_v1beta1/services/cluster_manager/client.py +++ b/google/cloud/container_v1beta1/services/cluster_manager/client.py @@ -341,6 +341,10 @@ def __init__( client_cert_source_for_mtls=client_cert_source_func, quota_project_id=client_options.quota_project_id, client_info=client_info, + always_use_jwt_access=( + Transport == type(self).get_transport_class("grpc") + or Transport == type(self).get_transport_class("grpc_asyncio") + ), ) def list_clusters( diff --git a/owl-bot-staging/v1/.coveragerc b/owl-bot-staging/v1/.coveragerc deleted file mode 100644 index f0a87b59..00000000 --- a/owl-bot-staging/v1/.coveragerc +++ /dev/null @@ -1,17 +0,0 @@ -[run] -branch = True - -[report] -show_missing = True -omit = - google/container/__init__.py -exclude_lines = - # Re-enable the standard pragma - pragma: NO COVER - # Ignore debug-only repr - def __repr__ - # Ignore pkg_resources exceptions. - # This is added at the module level as a safeguard for if someone - # generates the code and tries to run it without pip installing. This - # makes it virtually impossible to test properly. - except pkg_resources.DistributionNotFound diff --git a/owl-bot-staging/v1/MANIFEST.in b/owl-bot-staging/v1/MANIFEST.in deleted file mode 100644 index cd146430..00000000 --- a/owl-bot-staging/v1/MANIFEST.in +++ /dev/null @@ -1,2 +0,0 @@ -recursive-include google/container *.py -recursive-include google/container_v1 *.py diff --git a/owl-bot-staging/v1/README.rst b/owl-bot-staging/v1/README.rst deleted file mode 100644 index 83d9858c..00000000 --- a/owl-bot-staging/v1/README.rst +++ /dev/null @@ -1,49 +0,0 @@ -Python Client for Google Container API -================================================= - -Quick Start ------------ - -In order to use this library, you first need to go through the following steps: - -1. `Select or create a Cloud Platform project.`_ -2. `Enable billing for your project.`_ -3. Enable the Google Container API. -4. `Setup Authentication.`_ - -.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project -.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project -.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html - -Installation -~~~~~~~~~~~~ - -Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to -create isolated Python environments. The basic problem it addresses is one of -dependencies and versions, and indirectly permissions. - -With `virtualenv`_, it's possible to install this library without needing system -install permissions, and without clashing with the installed system -dependencies. - -.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ - - -Mac/Linux -^^^^^^^^^ - -.. code-block:: console - - python3 -m venv - source /bin/activate - /bin/pip install /path/to/library - - -Windows -^^^^^^^ - -.. code-block:: console - - python3 -m venv - \Scripts\activate - \Scripts\pip.exe install \path\to\library diff --git a/owl-bot-staging/v1/docs/conf.py b/owl-bot-staging/v1/docs/conf.py deleted file mode 100644 index 1f19408e..00000000 --- a/owl-bot-staging/v1/docs/conf.py +++ /dev/null @@ -1,376 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# -# google-container documentation build configuration file -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import sys -import os -import shlex - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath("..")) - -__version__ = "0.1.0" - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -needs_sphinx = "1.6.3" - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - "sphinx.ext.autodoc", - "sphinx.ext.autosummary", - "sphinx.ext.intersphinx", - "sphinx.ext.coverage", - "sphinx.ext.napoleon", - "sphinx.ext.todo", - "sphinx.ext.viewcode", -] - -# autodoc/autosummary flags -autoclass_content = "both" -autodoc_default_flags = ["members"] -autosummary_generate = True - - -# Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] - -# Allow markdown includes (so releases.md can include CHANGLEOG.md) -# http://www.sphinx-doc.org/en/master/markdown.html -source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -source_suffix = [".rst", ".md"] - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = "index" - -# General information about the project. -project = u"google-container" -copyright = u"2020, Google, LLC" -author = u"Google APIs" # TODO: autogenerate this bit - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The full version, including alpha/beta/rc tags. -release = __version__ -# The short X.Y version. -version = ".".join(release.split(".")[0:2]) - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ["_build"] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = "sphinx" - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - -# If true, `todo` and `todoList` produce output, else they produce nothing. -todo_include_todos = True - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = "alabaster" - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -html_theme_options = { - "description": "Google Client Libraries for Python", - "github_user": "googleapis", - "github_repo": "google-cloud-python", - "github_banner": True, - "font_family": "'Roboto', Georgia, sans", - "head_font_family": "'Roboto', Georgia, serif", - "code_font_family": "'Roboto Mono', 'Consolas', monospace", -} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Language to be used for generating the HTML full-text search index. -# Sphinx supports the following languages: -# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' -# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' -# html_search_language = 'en' - -# A dictionary with options for the search language support, empty by default. -# Now only 'ja' uses this config value -# html_search_options = {'type': 'default'} - -# The name of a javascript file (relative to the configuration directory) that -# implements a search results scorer. If empty, the default will be used. -# html_search_scorer = 'scorer.js' - -# Output file base name for HTML help builder. -htmlhelp_basename = "google-container-doc" - -# -- Options for warnings ------------------------------------------------------ - - -suppress_warnings = [ - # Temporarily suppress this to avoid "more than one target found for - # cross-reference" warning, which are intractable for us to avoid while in - # a mono-repo. - # See https://github.com/sphinx-doc/sphinx/blob - # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 - "ref.python" -] - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). - # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. - # 'preamble': '', - # Latex figure (float) alignment - # 'figure_align': 'htbp', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ( - master_doc, - "google-container.tex", - u"google-container Documentation", - author, - "manual", - ) -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ( - master_doc, - "google-container", - u"Google Container Documentation", - [author], - 1, - ) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ( - master_doc, - "google-container", - u"google-container Documentation", - author, - "google-container", - "GAPIC library for Google Container API", - "APIs", - ) -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False - - -# Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = { - "python": ("http://python.readthedocs.org/en/latest/", None), - "gax": ("https://gax-python.readthedocs.org/en/latest/", None), - "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), - "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), - "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None), - "grpc": ("https://grpc.io/grpc/python/", None), - "requests": ("http://requests.kennethreitz.org/en/stable/", None), - "proto": ("https://proto-plus-python.readthedocs.io/en/stable", None), - "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None), -} - - -# Napoleon settings -napoleon_google_docstring = True -napoleon_numpy_docstring = True -napoleon_include_private_with_doc = False -napoleon_include_special_with_doc = True -napoleon_use_admonition_for_examples = False -napoleon_use_admonition_for_notes = False -napoleon_use_admonition_for_references = False -napoleon_use_ivar = False -napoleon_use_param = True -napoleon_use_rtype = True diff --git a/owl-bot-staging/v1/docs/container_v1/cluster_manager.rst b/owl-bot-staging/v1/docs/container_v1/cluster_manager.rst deleted file mode 100644 index 016a460e..00000000 --- a/owl-bot-staging/v1/docs/container_v1/cluster_manager.rst +++ /dev/null @@ -1,10 +0,0 @@ -ClusterManager --------------------------------- - -.. automodule:: google.container_v1.services.cluster_manager - :members: - :inherited-members: - -.. automodule:: google.container_v1.services.cluster_manager.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1/docs/container_v1/services.rst b/owl-bot-staging/v1/docs/container_v1/services.rst deleted file mode 100644 index faa067b2..00000000 --- a/owl-bot-staging/v1/docs/container_v1/services.rst +++ /dev/null @@ -1,6 +0,0 @@ -Services for Google Container v1 API -==================================== -.. toctree:: - :maxdepth: 2 - - cluster_manager diff --git a/owl-bot-staging/v1/docs/container_v1/types.rst b/owl-bot-staging/v1/docs/container_v1/types.rst deleted file mode 100644 index 97997d9c..00000000 --- a/owl-bot-staging/v1/docs/container_v1/types.rst +++ /dev/null @@ -1,7 +0,0 @@ -Types for Google Container v1 API -================================= - -.. automodule:: google.container_v1.types - :members: - :undoc-members: - :show-inheritance: diff --git a/owl-bot-staging/v1/docs/index.rst b/owl-bot-staging/v1/docs/index.rst deleted file mode 100644 index 661ade54..00000000 --- a/owl-bot-staging/v1/docs/index.rst +++ /dev/null @@ -1,7 +0,0 @@ -API Reference -------------- -.. toctree:: - :maxdepth: 2 - - container_v1/services - container_v1/types diff --git a/owl-bot-staging/v1/google/container/__init__.py b/owl-bot-staging/v1/google/container/__init__.py deleted file mode 100644 index 3a1cd2e9..00000000 --- a/owl-bot-staging/v1/google/container/__init__.py +++ /dev/null @@ -1,213 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from google.container_v1.services.cluster_manager.client import ClusterManagerClient -from google.container_v1.services.cluster_manager.async_client import ClusterManagerAsyncClient - -from google.container_v1.types.cluster_service import AcceleratorConfig -from google.container_v1.types.cluster_service import AddonsConfig -from google.container_v1.types.cluster_service import AuthenticatorGroupsConfig -from google.container_v1.types.cluster_service import AutoprovisioningNodePoolDefaults -from google.container_v1.types.cluster_service import AutoUpgradeOptions -from google.container_v1.types.cluster_service import BinaryAuthorization -from google.container_v1.types.cluster_service import CancelOperationRequest -from google.container_v1.types.cluster_service import ClientCertificateConfig -from google.container_v1.types.cluster_service import CloudRunConfig -from google.container_v1.types.cluster_service import Cluster -from google.container_v1.types.cluster_service import ClusterAutoscaling -from google.container_v1.types.cluster_service import ClusterUpdate -from google.container_v1.types.cluster_service import CompleteIPRotationRequest -from google.container_v1.types.cluster_service import ConfigConnectorConfig -from google.container_v1.types.cluster_service import CreateClusterRequest -from google.container_v1.types.cluster_service import CreateNodePoolRequest -from google.container_v1.types.cluster_service import DailyMaintenanceWindow -from google.container_v1.types.cluster_service import DatabaseEncryption -from google.container_v1.types.cluster_service import DefaultSnatStatus -from google.container_v1.types.cluster_service import DeleteClusterRequest -from google.container_v1.types.cluster_service import DeleteNodePoolRequest -from google.container_v1.types.cluster_service import DnsCacheConfig -from google.container_v1.types.cluster_service import GetClusterRequest -from google.container_v1.types.cluster_service import GetJSONWebKeysRequest -from google.container_v1.types.cluster_service import GetJSONWebKeysResponse -from google.container_v1.types.cluster_service import GetNodePoolRequest -from google.container_v1.types.cluster_service import GetOpenIDConfigRequest -from google.container_v1.types.cluster_service import GetOpenIDConfigResponse -from google.container_v1.types.cluster_service import GetOperationRequest -from google.container_v1.types.cluster_service import GetServerConfigRequest -from google.container_v1.types.cluster_service import HorizontalPodAutoscaling -from google.container_v1.types.cluster_service import HttpLoadBalancing -from google.container_v1.types.cluster_service import IntraNodeVisibilityConfig -from google.container_v1.types.cluster_service import IPAllocationPolicy -from google.container_v1.types.cluster_service import Jwk -from google.container_v1.types.cluster_service import KubernetesDashboard -from google.container_v1.types.cluster_service import LegacyAbac -from google.container_v1.types.cluster_service import ListClustersRequest -from google.container_v1.types.cluster_service import ListClustersResponse -from google.container_v1.types.cluster_service import ListNodePoolsRequest -from google.container_v1.types.cluster_service import ListNodePoolsResponse -from google.container_v1.types.cluster_service import ListOperationsRequest -from google.container_v1.types.cluster_service import ListOperationsResponse -from google.container_v1.types.cluster_service import ListUsableSubnetworksRequest -from google.container_v1.types.cluster_service import ListUsableSubnetworksResponse -from google.container_v1.types.cluster_service import MaintenancePolicy -from google.container_v1.types.cluster_service import MaintenanceWindow -from google.container_v1.types.cluster_service import MasterAuth -from google.container_v1.types.cluster_service import MasterAuthorizedNetworksConfig -from google.container_v1.types.cluster_service import MaxPodsConstraint -from google.container_v1.types.cluster_service import NetworkConfig -from google.container_v1.types.cluster_service import NetworkPolicy -from google.container_v1.types.cluster_service import NetworkPolicyConfig -from google.container_v1.types.cluster_service import NodeConfig -from google.container_v1.types.cluster_service import NodeManagement -from google.container_v1.types.cluster_service import NodePool -from google.container_v1.types.cluster_service import NodePoolAutoscaling -from google.container_v1.types.cluster_service import NodeTaint -from google.container_v1.types.cluster_service import Operation -from google.container_v1.types.cluster_service import OperationProgress -from google.container_v1.types.cluster_service import PrivateClusterConfig -from google.container_v1.types.cluster_service import PrivateClusterMasterGlobalAccessConfig -from google.container_v1.types.cluster_service import RecurringTimeWindow -from google.container_v1.types.cluster_service import ReleaseChannel -from google.container_v1.types.cluster_service import ReservationAffinity -from google.container_v1.types.cluster_service import ResourceLimit -from google.container_v1.types.cluster_service import ResourceUsageExportConfig -from google.container_v1.types.cluster_service import RollbackNodePoolUpgradeRequest -from google.container_v1.types.cluster_service import SandboxConfig -from google.container_v1.types.cluster_service import ServerConfig -from google.container_v1.types.cluster_service import SetAddonsConfigRequest -from google.container_v1.types.cluster_service import SetLabelsRequest -from google.container_v1.types.cluster_service import SetLegacyAbacRequest -from google.container_v1.types.cluster_service import SetLocationsRequest -from google.container_v1.types.cluster_service import SetLoggingServiceRequest -from google.container_v1.types.cluster_service import SetMaintenancePolicyRequest -from google.container_v1.types.cluster_service import SetMasterAuthRequest -from google.container_v1.types.cluster_service import SetMonitoringServiceRequest -from google.container_v1.types.cluster_service import SetNetworkPolicyRequest -from google.container_v1.types.cluster_service import SetNodePoolAutoscalingRequest -from google.container_v1.types.cluster_service import SetNodePoolManagementRequest -from google.container_v1.types.cluster_service import SetNodePoolSizeRequest -from google.container_v1.types.cluster_service import ShieldedInstanceConfig -from google.container_v1.types.cluster_service import ShieldedNodes -from google.container_v1.types.cluster_service import StartIPRotationRequest -from google.container_v1.types.cluster_service import StatusCondition -from google.container_v1.types.cluster_service import TimeWindow -from google.container_v1.types.cluster_service import UpdateClusterRequest -from google.container_v1.types.cluster_service import UpdateMasterRequest -from google.container_v1.types.cluster_service import UpdateNodePoolRequest -from google.container_v1.types.cluster_service import UsableSubnetwork -from google.container_v1.types.cluster_service import UsableSubnetworkSecondaryRange -from google.container_v1.types.cluster_service import VerticalPodAutoscaling -from google.container_v1.types.cluster_service import WorkloadIdentityConfig -from google.container_v1.types.cluster_service import WorkloadMetadataConfig - -__all__ = ('ClusterManagerClient', - 'ClusterManagerAsyncClient', - 'AcceleratorConfig', - 'AddonsConfig', - 'AuthenticatorGroupsConfig', - 'AutoprovisioningNodePoolDefaults', - 'AutoUpgradeOptions', - 'BinaryAuthorization', - 'CancelOperationRequest', - 'ClientCertificateConfig', - 'CloudRunConfig', - 'Cluster', - 'ClusterAutoscaling', - 'ClusterUpdate', - 'CompleteIPRotationRequest', - 'ConfigConnectorConfig', - 'CreateClusterRequest', - 'CreateNodePoolRequest', - 'DailyMaintenanceWindow', - 'DatabaseEncryption', - 'DefaultSnatStatus', - 'DeleteClusterRequest', - 'DeleteNodePoolRequest', - 'DnsCacheConfig', - 'GetClusterRequest', - 'GetJSONWebKeysRequest', - 'GetJSONWebKeysResponse', - 'GetNodePoolRequest', - 'GetOpenIDConfigRequest', - 'GetOpenIDConfigResponse', - 'GetOperationRequest', - 'GetServerConfigRequest', - 'HorizontalPodAutoscaling', - 'HttpLoadBalancing', - 'IntraNodeVisibilityConfig', - 'IPAllocationPolicy', - 'Jwk', - 'KubernetesDashboard', - 'LegacyAbac', - 'ListClustersRequest', - 'ListClustersResponse', - 'ListNodePoolsRequest', - 'ListNodePoolsResponse', - 'ListOperationsRequest', - 'ListOperationsResponse', - 'ListUsableSubnetworksRequest', - 'ListUsableSubnetworksResponse', - 'MaintenancePolicy', - 'MaintenanceWindow', - 'MasterAuth', - 'MasterAuthorizedNetworksConfig', - 'MaxPodsConstraint', - 'NetworkConfig', - 'NetworkPolicy', - 'NetworkPolicyConfig', - 'NodeConfig', - 'NodeManagement', - 'NodePool', - 'NodePoolAutoscaling', - 'NodeTaint', - 'Operation', - 'OperationProgress', - 'PrivateClusterConfig', - 'PrivateClusterMasterGlobalAccessConfig', - 'RecurringTimeWindow', - 'ReleaseChannel', - 'ReservationAffinity', - 'ResourceLimit', - 'ResourceUsageExportConfig', - 'RollbackNodePoolUpgradeRequest', - 'SandboxConfig', - 'ServerConfig', - 'SetAddonsConfigRequest', - 'SetLabelsRequest', - 'SetLegacyAbacRequest', - 'SetLocationsRequest', - 'SetLoggingServiceRequest', - 'SetMaintenancePolicyRequest', - 'SetMasterAuthRequest', - 'SetMonitoringServiceRequest', - 'SetNetworkPolicyRequest', - 'SetNodePoolAutoscalingRequest', - 'SetNodePoolManagementRequest', - 'SetNodePoolSizeRequest', - 'ShieldedInstanceConfig', - 'ShieldedNodes', - 'StartIPRotationRequest', - 'StatusCondition', - 'TimeWindow', - 'UpdateClusterRequest', - 'UpdateMasterRequest', - 'UpdateNodePoolRequest', - 'UsableSubnetwork', - 'UsableSubnetworkSecondaryRange', - 'VerticalPodAutoscaling', - 'WorkloadIdentityConfig', - 'WorkloadMetadataConfig', -) diff --git a/owl-bot-staging/v1/google/container/py.typed b/owl-bot-staging/v1/google/container/py.typed deleted file mode 100644 index fd835114..00000000 --- a/owl-bot-staging/v1/google/container/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-container package uses inline types. diff --git a/owl-bot-staging/v1/google/container_v1/__init__.py b/owl-bot-staging/v1/google/container_v1/__init__.py deleted file mode 100644 index e9efa5ea..00000000 --- a/owl-bot-staging/v1/google/container_v1/__init__.py +++ /dev/null @@ -1,214 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from .services.cluster_manager import ClusterManagerClient -from .services.cluster_manager import ClusterManagerAsyncClient - -from .types.cluster_service import AcceleratorConfig -from .types.cluster_service import AddonsConfig -from .types.cluster_service import AuthenticatorGroupsConfig -from .types.cluster_service import AutoprovisioningNodePoolDefaults -from .types.cluster_service import AutoUpgradeOptions -from .types.cluster_service import BinaryAuthorization -from .types.cluster_service import CancelOperationRequest -from .types.cluster_service import ClientCertificateConfig -from .types.cluster_service import CloudRunConfig -from .types.cluster_service import Cluster -from .types.cluster_service import ClusterAutoscaling -from .types.cluster_service import ClusterUpdate -from .types.cluster_service import CompleteIPRotationRequest -from .types.cluster_service import ConfigConnectorConfig -from .types.cluster_service import CreateClusterRequest -from .types.cluster_service import CreateNodePoolRequest -from .types.cluster_service import DailyMaintenanceWindow -from .types.cluster_service import DatabaseEncryption -from .types.cluster_service import DefaultSnatStatus -from .types.cluster_service import DeleteClusterRequest -from .types.cluster_service import DeleteNodePoolRequest -from .types.cluster_service import DnsCacheConfig -from .types.cluster_service import GetClusterRequest -from .types.cluster_service import GetJSONWebKeysRequest -from .types.cluster_service import GetJSONWebKeysResponse -from .types.cluster_service import GetNodePoolRequest -from .types.cluster_service import GetOpenIDConfigRequest -from .types.cluster_service import GetOpenIDConfigResponse -from .types.cluster_service import GetOperationRequest -from .types.cluster_service import GetServerConfigRequest -from .types.cluster_service import HorizontalPodAutoscaling -from .types.cluster_service import HttpLoadBalancing -from .types.cluster_service import IntraNodeVisibilityConfig -from .types.cluster_service import IPAllocationPolicy -from .types.cluster_service import Jwk -from .types.cluster_service import KubernetesDashboard -from .types.cluster_service import LegacyAbac -from .types.cluster_service import ListClustersRequest -from .types.cluster_service import ListClustersResponse -from .types.cluster_service import ListNodePoolsRequest -from .types.cluster_service import ListNodePoolsResponse -from .types.cluster_service import ListOperationsRequest -from .types.cluster_service import ListOperationsResponse -from .types.cluster_service import ListUsableSubnetworksRequest -from .types.cluster_service import ListUsableSubnetworksResponse -from .types.cluster_service import MaintenancePolicy -from .types.cluster_service import MaintenanceWindow -from .types.cluster_service import MasterAuth -from .types.cluster_service import MasterAuthorizedNetworksConfig -from .types.cluster_service import MaxPodsConstraint -from .types.cluster_service import NetworkConfig -from .types.cluster_service import NetworkPolicy -from .types.cluster_service import NetworkPolicyConfig -from .types.cluster_service import NodeConfig -from .types.cluster_service import NodeManagement -from .types.cluster_service import NodePool -from .types.cluster_service import NodePoolAutoscaling -from .types.cluster_service import NodeTaint -from .types.cluster_service import Operation -from .types.cluster_service import OperationProgress -from .types.cluster_service import PrivateClusterConfig -from .types.cluster_service import PrivateClusterMasterGlobalAccessConfig -from .types.cluster_service import RecurringTimeWindow -from .types.cluster_service import ReleaseChannel -from .types.cluster_service import ReservationAffinity -from .types.cluster_service import ResourceLimit -from .types.cluster_service import ResourceUsageExportConfig -from .types.cluster_service import RollbackNodePoolUpgradeRequest -from .types.cluster_service import SandboxConfig -from .types.cluster_service import ServerConfig -from .types.cluster_service import SetAddonsConfigRequest -from .types.cluster_service import SetLabelsRequest -from .types.cluster_service import SetLegacyAbacRequest -from .types.cluster_service import SetLocationsRequest -from .types.cluster_service import SetLoggingServiceRequest -from .types.cluster_service import SetMaintenancePolicyRequest -from .types.cluster_service import SetMasterAuthRequest -from .types.cluster_service import SetMonitoringServiceRequest -from .types.cluster_service import SetNetworkPolicyRequest -from .types.cluster_service import SetNodePoolAutoscalingRequest -from .types.cluster_service import SetNodePoolManagementRequest -from .types.cluster_service import SetNodePoolSizeRequest -from .types.cluster_service import ShieldedInstanceConfig -from .types.cluster_service import ShieldedNodes -from .types.cluster_service import StartIPRotationRequest -from .types.cluster_service import StatusCondition -from .types.cluster_service import TimeWindow -from .types.cluster_service import UpdateClusterRequest -from .types.cluster_service import UpdateMasterRequest -from .types.cluster_service import UpdateNodePoolRequest -from .types.cluster_service import UsableSubnetwork -from .types.cluster_service import UsableSubnetworkSecondaryRange -from .types.cluster_service import VerticalPodAutoscaling -from .types.cluster_service import WorkloadIdentityConfig -from .types.cluster_service import WorkloadMetadataConfig - -__all__ = ( - 'ClusterManagerAsyncClient', -'AcceleratorConfig', -'AddonsConfig', -'AuthenticatorGroupsConfig', -'AutoUpgradeOptions', -'AutoprovisioningNodePoolDefaults', -'BinaryAuthorization', -'CancelOperationRequest', -'ClientCertificateConfig', -'CloudRunConfig', -'Cluster', -'ClusterAutoscaling', -'ClusterManagerClient', -'ClusterUpdate', -'CompleteIPRotationRequest', -'ConfigConnectorConfig', -'CreateClusterRequest', -'CreateNodePoolRequest', -'DailyMaintenanceWindow', -'DatabaseEncryption', -'DefaultSnatStatus', -'DeleteClusterRequest', -'DeleteNodePoolRequest', -'DnsCacheConfig', -'GetClusterRequest', -'GetJSONWebKeysRequest', -'GetJSONWebKeysResponse', -'GetNodePoolRequest', -'GetOpenIDConfigRequest', -'GetOpenIDConfigResponse', -'GetOperationRequest', -'GetServerConfigRequest', -'HorizontalPodAutoscaling', -'HttpLoadBalancing', -'IPAllocationPolicy', -'IntraNodeVisibilityConfig', -'Jwk', -'KubernetesDashboard', -'LegacyAbac', -'ListClustersRequest', -'ListClustersResponse', -'ListNodePoolsRequest', -'ListNodePoolsResponse', -'ListOperationsRequest', -'ListOperationsResponse', -'ListUsableSubnetworksRequest', -'ListUsableSubnetworksResponse', -'MaintenancePolicy', -'MaintenanceWindow', -'MasterAuth', -'MasterAuthorizedNetworksConfig', -'MaxPodsConstraint', -'NetworkConfig', -'NetworkPolicy', -'NetworkPolicyConfig', -'NodeConfig', -'NodeManagement', -'NodePool', -'NodePoolAutoscaling', -'NodeTaint', -'Operation', -'OperationProgress', -'PrivateClusterConfig', -'PrivateClusterMasterGlobalAccessConfig', -'RecurringTimeWindow', -'ReleaseChannel', -'ReservationAffinity', -'ResourceLimit', -'ResourceUsageExportConfig', -'RollbackNodePoolUpgradeRequest', -'SandboxConfig', -'ServerConfig', -'SetAddonsConfigRequest', -'SetLabelsRequest', -'SetLegacyAbacRequest', -'SetLocationsRequest', -'SetLoggingServiceRequest', -'SetMaintenancePolicyRequest', -'SetMasterAuthRequest', -'SetMonitoringServiceRequest', -'SetNetworkPolicyRequest', -'SetNodePoolAutoscalingRequest', -'SetNodePoolManagementRequest', -'SetNodePoolSizeRequest', -'ShieldedInstanceConfig', -'ShieldedNodes', -'StartIPRotationRequest', -'StatusCondition', -'TimeWindow', -'UpdateClusterRequest', -'UpdateMasterRequest', -'UpdateNodePoolRequest', -'UsableSubnetwork', -'UsableSubnetworkSecondaryRange', -'VerticalPodAutoscaling', -'WorkloadIdentityConfig', -'WorkloadMetadataConfig', -) diff --git a/owl-bot-staging/v1/google/container_v1/gapic_metadata.json b/owl-bot-staging/v1/google/container_v1/gapic_metadata.json deleted file mode 100644 index 1638f865..00000000 --- a/owl-bot-staging/v1/google/container_v1/gapic_metadata.json +++ /dev/null @@ -1,343 +0,0 @@ - { - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", - "language": "python", - "libraryPackage": "google.container_v1", - "protoPackage": "google.container.v1", - "schema": "1.0", - "services": { - "ClusterManager": { - "clients": { - "grpc": { - "libraryClient": "ClusterManagerClient", - "rpcs": { - "CancelOperation": { - "methods": [ - "cancel_operation" - ] - }, - "CompleteIPRotation": { - "methods": [ - "complete_ip_rotation" - ] - }, - "CreateCluster": { - "methods": [ - "create_cluster" - ] - }, - "CreateNodePool": { - "methods": [ - "create_node_pool" - ] - }, - "DeleteCluster": { - "methods": [ - "delete_cluster" - ] - }, - "DeleteNodePool": { - "methods": [ - "delete_node_pool" - ] - }, - "GetCluster": { - "methods": [ - "get_cluster" - ] - }, - "GetJSONWebKeys": { - "methods": [ - "get_json_web_keys" - ] - }, - "GetNodePool": { - "methods": [ - "get_node_pool" - ] - }, - "GetOperation": { - "methods": [ - "get_operation" - ] - }, - "GetServerConfig": { - "methods": [ - "get_server_config" - ] - }, - "ListClusters": { - "methods": [ - "list_clusters" - ] - }, - "ListNodePools": { - "methods": [ - "list_node_pools" - ] - }, - "ListOperations": { - "methods": [ - "list_operations" - ] - }, - "ListUsableSubnetworks": { - "methods": [ - "list_usable_subnetworks" - ] - }, - "RollbackNodePoolUpgrade": { - "methods": [ - "rollback_node_pool_upgrade" - ] - }, - "SetAddonsConfig": { - "methods": [ - "set_addons_config" - ] - }, - "SetLabels": { - "methods": [ - "set_labels" - ] - }, - "SetLegacyAbac": { - "methods": [ - "set_legacy_abac" - ] - }, - "SetLocations": { - "methods": [ - "set_locations" - ] - }, - "SetLoggingService": { - "methods": [ - "set_logging_service" - ] - }, - "SetMaintenancePolicy": { - "methods": [ - "set_maintenance_policy" - ] - }, - "SetMasterAuth": { - "methods": [ - "set_master_auth" - ] - }, - "SetMonitoringService": { - "methods": [ - "set_monitoring_service" - ] - }, - "SetNetworkPolicy": { - "methods": [ - "set_network_policy" - ] - }, - "SetNodePoolAutoscaling": { - "methods": [ - "set_node_pool_autoscaling" - ] - }, - "SetNodePoolManagement": { - "methods": [ - "set_node_pool_management" - ] - }, - "SetNodePoolSize": { - "methods": [ - "set_node_pool_size" - ] - }, - "StartIPRotation": { - "methods": [ - "start_ip_rotation" - ] - }, - "UpdateCluster": { - "methods": [ - "update_cluster" - ] - }, - "UpdateMaster": { - "methods": [ - "update_master" - ] - }, - "UpdateNodePool": { - "methods": [ - "update_node_pool" - ] - } - } - }, - "grpc-async": { - "libraryClient": "ClusterManagerAsyncClient", - "rpcs": { - "CancelOperation": { - "methods": [ - "cancel_operation" - ] - }, - "CompleteIPRotation": { - "methods": [ - "complete_ip_rotation" - ] - }, - "CreateCluster": { - "methods": [ - "create_cluster" - ] - }, - "CreateNodePool": { - "methods": [ - "create_node_pool" - ] - }, - "DeleteCluster": { - "methods": [ - "delete_cluster" - ] - }, - "DeleteNodePool": { - "methods": [ - "delete_node_pool" - ] - }, - "GetCluster": { - "methods": [ - "get_cluster" - ] - }, - "GetJSONWebKeys": { - "methods": [ - "get_json_web_keys" - ] - }, - "GetNodePool": { - "methods": [ - "get_node_pool" - ] - }, - "GetOperation": { - "methods": [ - "get_operation" - ] - }, - "GetServerConfig": { - "methods": [ - "get_server_config" - ] - }, - "ListClusters": { - "methods": [ - "list_clusters" - ] - }, - "ListNodePools": { - "methods": [ - "list_node_pools" - ] - }, - "ListOperations": { - "methods": [ - "list_operations" - ] - }, - "ListUsableSubnetworks": { - "methods": [ - "list_usable_subnetworks" - ] - }, - "RollbackNodePoolUpgrade": { - "methods": [ - "rollback_node_pool_upgrade" - ] - }, - "SetAddonsConfig": { - "methods": [ - "set_addons_config" - ] - }, - "SetLabels": { - "methods": [ - "set_labels" - ] - }, - "SetLegacyAbac": { - "methods": [ - "set_legacy_abac" - ] - }, - "SetLocations": { - "methods": [ - "set_locations" - ] - }, - "SetLoggingService": { - "methods": [ - "set_logging_service" - ] - }, - "SetMaintenancePolicy": { - "methods": [ - "set_maintenance_policy" - ] - }, - "SetMasterAuth": { - "methods": [ - "set_master_auth" - ] - }, - "SetMonitoringService": { - "methods": [ - "set_monitoring_service" - ] - }, - "SetNetworkPolicy": { - "methods": [ - "set_network_policy" - ] - }, - "SetNodePoolAutoscaling": { - "methods": [ - "set_node_pool_autoscaling" - ] - }, - "SetNodePoolManagement": { - "methods": [ - "set_node_pool_management" - ] - }, - "SetNodePoolSize": { - "methods": [ - "set_node_pool_size" - ] - }, - "StartIPRotation": { - "methods": [ - "start_ip_rotation" - ] - }, - "UpdateCluster": { - "methods": [ - "update_cluster" - ] - }, - "UpdateMaster": { - "methods": [ - "update_master" - ] - }, - "UpdateNodePool": { - "methods": [ - "update_node_pool" - ] - } - } - } - } - } - } -} diff --git a/owl-bot-staging/v1/google/container_v1/py.typed b/owl-bot-staging/v1/google/container_v1/py.typed deleted file mode 100644 index fd835114..00000000 --- a/owl-bot-staging/v1/google/container_v1/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-container package uses inline types. diff --git a/owl-bot-staging/v1/google/container_v1/services/__init__.py b/owl-bot-staging/v1/google/container_v1/services/__init__.py deleted file mode 100644 index 4de65971..00000000 --- a/owl-bot-staging/v1/google/container_v1/services/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/google/container_v1/services/cluster_manager/__init__.py b/owl-bot-staging/v1/google/container_v1/services/cluster_manager/__init__.py deleted file mode 100644 index 490efad3..00000000 --- a/owl-bot-staging/v1/google/container_v1/services/cluster_manager/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import ClusterManagerClient -from .async_client import ClusterManagerAsyncClient - -__all__ = ( - 'ClusterManagerClient', - 'ClusterManagerAsyncClient', -) diff --git a/owl-bot-staging/v1/google/container_v1/services/cluster_manager/async_client.py b/owl-bot-staging/v1/google/container_v1/services/cluster_manager/async_client.py deleted file mode 100644 index b53572ad..00000000 --- a/owl-bot-staging/v1/google/container_v1/services/cluster_manager/async_client.py +++ /dev/null @@ -1,3604 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources -import warnings - -import google.api_core.client_options as ClientOptions # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.container_v1.services.cluster_manager import pagers -from google.container_v1.types import cluster_service -from .transports.base import ClusterManagerTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import ClusterManagerGrpcAsyncIOTransport -from .client import ClusterManagerClient - - -class ClusterManagerAsyncClient: - """Google Kubernetes Engine Cluster Manager v1""" - - _client: ClusterManagerClient - - DEFAULT_ENDPOINT = ClusterManagerClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = ClusterManagerClient.DEFAULT_MTLS_ENDPOINT - - common_billing_account_path = staticmethod(ClusterManagerClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(ClusterManagerClient.parse_common_billing_account_path) - common_folder_path = staticmethod(ClusterManagerClient.common_folder_path) - parse_common_folder_path = staticmethod(ClusterManagerClient.parse_common_folder_path) - common_organization_path = staticmethod(ClusterManagerClient.common_organization_path) - parse_common_organization_path = staticmethod(ClusterManagerClient.parse_common_organization_path) - common_project_path = staticmethod(ClusterManagerClient.common_project_path) - parse_common_project_path = staticmethod(ClusterManagerClient.parse_common_project_path) - common_location_path = staticmethod(ClusterManagerClient.common_location_path) - parse_common_location_path = staticmethod(ClusterManagerClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - ClusterManagerAsyncClient: The constructed client. - """ - return ClusterManagerClient.from_service_account_info.__func__(ClusterManagerAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - ClusterManagerAsyncClient: The constructed client. - """ - return ClusterManagerClient.from_service_account_file.__func__(ClusterManagerAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> ClusterManagerTransport: - """Returns the transport used by the client instance. - - Returns: - ClusterManagerTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(ClusterManagerClient).get_transport_class, type(ClusterManagerClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, ClusterManagerTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the cluster manager client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.ClusterManagerTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = ClusterManagerClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def list_clusters(self, - request: cluster_service.ListClustersRequest = None, - *, - project_id: str = None, - zone: str = None, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.ListClustersResponse: - r"""Lists all clusters owned by a project in either the - specified zone or all zones. - - Args: - request (:class:`google.container_v1.types.ListClustersRequest`): - The request object. ListClustersRequest lists clusters. - project_id (:class:`str`): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the - parent field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides, or "-" for all zones. This - field has been deprecated and replaced by the parent - field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - parent (:class:`str`): - The parent (project and location) where the clusters - will be listed. Specified in the format - ``projects/*/locations/*``. Location "-" matches all - zones and all regions. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.ListClustersResponse: - ListClustersResponse is the result of - ListClustersRequest. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.ListClustersRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_clusters, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=20.0, - ), - default_timeout=20.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_cluster(self, - request: cluster_service.GetClusterRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Cluster: - r"""Gets the details of a specific cluster. - - Args: - request (:class:`google.container_v1.types.GetClusterRequest`): - The request object. GetClusterRequest gets the settings - of a cluster. - project_id (:class:`str`): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (:class:`str`): - Deprecated. The name of the cluster - to retrieve. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - name (:class:`str`): - The name (project, location, cluster) of the cluster to - retrieve. Specified in the format - ``projects/*/locations/*/clusters/*``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.Cluster: - A Google Kubernetes Engine cluster. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.GetClusterRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_cluster, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=20.0, - ), - default_timeout=20.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def create_cluster(self, - request: cluster_service.CreateClusterRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster: cluster_service.Cluster = None, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Creates a cluster, consisting of the specified number and type - of Google Compute Engine instances. - - By default, the cluster is created in the project's `default - network `__. - - One firewall is added for the cluster. After cluster creation, - the Kubelet creates routes for each node to allow the containers - on that node to communicate with all other instances in the - cluster. - - Finally, an entry is added to the project's global metadata - indicating which CIDR range the cluster is using. - - Args: - request (:class:`google.container_v1.types.CreateClusterRequest`): - The request object. CreateClusterRequest creates a - cluster. - project_id (:class:`str`): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the - parent field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the parent field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster (:class:`google.container_v1.types.Cluster`): - Required. A `cluster - resource `__ - - This corresponds to the ``cluster`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - parent (:class:`str`): - The parent (project and location) where the cluster will - be created. Specified in the format - ``projects/*/locations/*``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster, parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.CreateClusterRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster is not None: - request.cluster = cluster - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_cluster, - default_timeout=45.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_cluster(self, - request: cluster_service.UpdateClusterRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - update: cluster_service.ClusterUpdate = None, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Updates the settings of a specific cluster. - - Args: - request (:class:`google.container_v1.types.UpdateClusterRequest`): - The request object. UpdateClusterRequest updates the - settings of a cluster. - project_id (:class:`str`): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (:class:`str`): - Deprecated. The name of the cluster - to upgrade. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update (:class:`google.container_v1.types.ClusterUpdate`): - Required. A description of the - update. - - This corresponds to the ``update`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - name (:class:`str`): - The name (project, location, cluster) of the cluster to - update. Specified in the format - ``projects/*/locations/*/clusters/*``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, update, name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.UpdateClusterRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if update is not None: - request.update = update - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_cluster, - default_timeout=45.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_node_pool(self, - request: cluster_service.UpdateNodePoolRequest = None, - *, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Updates the version and/or image type for the - specified node pool. - - Args: - request (:class:`google.container_v1.types.UpdateNodePoolRequest`): - The request object. UpdateNodePoolRequests update a node - pool's image and/or version. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - request = cluster_service.UpdateNodePoolRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_node_pool, - default_timeout=45.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def set_node_pool_autoscaling(self, - request: cluster_service.SetNodePoolAutoscalingRequest = None, - *, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Sets the autoscaling settings for the specified node - pool. - - Args: - request (:class:`google.container_v1.types.SetNodePoolAutoscalingRequest`): - The request object. SetNodePoolAutoscalingRequest sets - the autoscaler settings of a node pool. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - request = cluster_service.SetNodePoolAutoscalingRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.set_node_pool_autoscaling, - default_timeout=45.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def set_logging_service(self, - request: cluster_service.SetLoggingServiceRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - logging_service: str = None, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Sets the logging service for a specific cluster. - - Args: - request (:class:`google.container_v1.types.SetLoggingServiceRequest`): - The request object. SetLoggingServiceRequest sets the - logging service of a cluster. - project_id (:class:`str`): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (:class:`str`): - Deprecated. The name of the cluster - to upgrade. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - logging_service (:class:`str`): - Required. The logging service the cluster should use to - write logs. Currently available options: - - - ``logging.googleapis.com/kubernetes`` - The Cloud - Logging service with a Kubernetes-native resource - model - - ``logging.googleapis.com`` - The legacy Cloud Logging - service (no longer available as of GKE 1.15). - - ``none`` - no logs will be exported from the cluster. - - If left as an empty - string,\ ``logging.googleapis.com/kubernetes`` will be - used for GKE 1.14+ or ``logging.googleapis.com`` for - earlier versions. - - This corresponds to the ``logging_service`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - name (:class:`str`): - The name (project, location, cluster) of the cluster to - set logging. Specified in the format - ``projects/*/locations/*/clusters/*``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, logging_service, name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.SetLoggingServiceRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if logging_service is not None: - request.logging_service = logging_service - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.set_logging_service, - default_timeout=45.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def set_monitoring_service(self, - request: cluster_service.SetMonitoringServiceRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - monitoring_service: str = None, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Sets the monitoring service for a specific cluster. - - Args: - request (:class:`google.container_v1.types.SetMonitoringServiceRequest`): - The request object. SetMonitoringServiceRequest sets the - monitoring service of a cluster. - project_id (:class:`str`): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (:class:`str`): - Deprecated. The name of the cluster - to upgrade. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - monitoring_service (:class:`str`): - Required. The monitoring service the cluster should use - to write metrics. Currently available options: - - - "monitoring.googleapis.com/kubernetes" - The Cloud - Monitoring service with a Kubernetes-native resource - model - - ``monitoring.googleapis.com`` - The legacy Cloud - Monitoring service (no longer available as of GKE - 1.15). - - ``none`` - No metrics will be exported from the - cluster. - - If left as an empty - string,\ ``monitoring.googleapis.com/kubernetes`` will - be used for GKE 1.14+ or ``monitoring.googleapis.com`` - for earlier versions. - - This corresponds to the ``monitoring_service`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - name (:class:`str`): - The name (project, location, cluster) of the cluster to - set monitoring. Specified in the format - ``projects/*/locations/*/clusters/*``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, monitoring_service, name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.SetMonitoringServiceRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if monitoring_service is not None: - request.monitoring_service = monitoring_service - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.set_monitoring_service, - default_timeout=45.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def set_addons_config(self, - request: cluster_service.SetAddonsConfigRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - addons_config: cluster_service.AddonsConfig = None, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Sets the addons for a specific cluster. - - Args: - request (:class:`google.container_v1.types.SetAddonsConfigRequest`): - The request object. SetAddonsConfigRequest sets the - addons associated with the cluster. - project_id (:class:`str`): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (:class:`str`): - Deprecated. The name of the cluster - to upgrade. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - addons_config (:class:`google.container_v1.types.AddonsConfig`): - Required. The desired configurations - for the various addons available to run - in the cluster. - - This corresponds to the ``addons_config`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - name (:class:`str`): - The name (project, location, cluster) of the cluster to - set addons. Specified in the format - ``projects/*/locations/*/clusters/*``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, addons_config, name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.SetAddonsConfigRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if addons_config is not None: - request.addons_config = addons_config - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.set_addons_config, - default_timeout=45.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def set_locations(self, - request: cluster_service.SetLocationsRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - locations: Sequence[str] = None, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Sets the locations for a specific cluster. Deprecated. Use - `projects.locations.clusters.update `__ - instead. - - Args: - request (:class:`google.container_v1.types.SetLocationsRequest`): - The request object. SetLocationsRequest sets the - locations of the cluster. - project_id (:class:`str`): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (:class:`str`): - Deprecated. The name of the cluster - to upgrade. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - locations (:class:`Sequence[str]`): - Required. The desired list of Google Compute Engine - `zones `__ - in which the cluster's nodes should be located. Changing - the locations a cluster is in will result in nodes being - either created or removed from the cluster, depending on - whether locations are being added or removed. - - This list must always include the cluster's primary - zone. - - This corresponds to the ``locations`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - name (:class:`str`): - The name (project, location, cluster) of the cluster to - set locations. Specified in the format - ``projects/*/locations/*/clusters/*``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - warnings.warn("ClusterManagerAsyncClient.set_locations is deprecated", - DeprecationWarning) - - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, locations, name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.SetLocationsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if name is not None: - request.name = name - if locations: - request.locations.extend(locations) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.set_locations, - default_timeout=45.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_master(self, - request: cluster_service.UpdateMasterRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - master_version: str = None, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Updates the master for a specific cluster. - - Args: - request (:class:`google.container_v1.types.UpdateMasterRequest`): - The request object. UpdateMasterRequest updates the - master of the cluster. - project_id (:class:`str`): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (:class:`str`): - Deprecated. The name of the cluster - to upgrade. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - master_version (:class:`str`): - Required. The Kubernetes version to - change the master to. - Users may specify either explicit - versions offered by Kubernetes Engine or - version aliases, which have the - following behavior: - - "latest": picks the highest valid - Kubernetes version - "1.X": picks the - highest valid patch+gke.N patch in the - 1.X version - "1.X.Y": picks the highest - valid gke.N patch in the 1.X.Y version - - "1.X.Y-gke.N": picks an explicit - Kubernetes version - "-": picks the - default Kubernetes version - - This corresponds to the ``master_version`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - name (:class:`str`): - The name (project, location, cluster) of the cluster to - update. Specified in the format - ``projects/*/locations/*/clusters/*``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, master_version, name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.UpdateMasterRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if master_version is not None: - request.master_version = master_version - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_master, - default_timeout=45.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def set_master_auth(self, - request: cluster_service.SetMasterAuthRequest = None, - *, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Sets master auth materials. Currently supports - changing the admin password or a specific cluster, - either via password generation or explicitly setting the - password. - - Args: - request (:class:`google.container_v1.types.SetMasterAuthRequest`): - The request object. SetMasterAuthRequest updates the - admin password of a cluster. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - request = cluster_service.SetMasterAuthRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.set_master_auth, - default_timeout=45.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_cluster(self, - request: cluster_service.DeleteClusterRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Deletes the cluster, including the Kubernetes - endpoint and all worker nodes. - - Firewalls and routes that were configured during cluster - creation are also deleted. - - Other Google Compute Engine resources that might be in - use by the cluster, such as load balancer resources, are - not deleted if they weren't present when the cluster was - initially created. - - Args: - request (:class:`google.container_v1.types.DeleteClusterRequest`): - The request object. DeleteClusterRequest deletes a - cluster. - project_id (:class:`str`): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (:class:`str`): - Deprecated. The name of the cluster - to delete. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - name (:class:`str`): - The name (project, location, cluster) of the cluster to - delete. Specified in the format - ``projects/*/locations/*/clusters/*``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.DeleteClusterRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_cluster, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=20.0, - ), - default_timeout=20.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_operations(self, - request: cluster_service.ListOperationsRequest = None, - *, - project_id: str = None, - zone: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.ListOperationsResponse: - r"""Lists all operations in a project in a specific zone - or all zones. - - Args: - request (:class:`google.container_v1.types.ListOperationsRequest`): - The request object. ListOperationsRequest lists - operations. - project_id (:class:`str`): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the - parent field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Deprecated. The name of the Google Compute Engine - `zone `__ - to return operations for, or ``-`` for all zones. This - field has been deprecated and replaced by the parent - field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.ListOperationsResponse: - ListOperationsResponse is the result - of ListOperationsRequest. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.ListOperationsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_operations, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=20.0, - ), - default_timeout=20.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_operation(self, - request: cluster_service.GetOperationRequest = None, - *, - project_id: str = None, - zone: str = None, - operation_id: str = None, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Gets the specified operation. - - Args: - request (:class:`google.container_v1.types.GetOperationRequest`): - The request object. GetOperationRequest gets a single - operation. - project_id (:class:`str`): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - operation_id (:class:`str`): - Deprecated. The server-assigned ``name`` of the - operation. This field has been deprecated and replaced - by the name field. - - This corresponds to the ``operation_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - name (:class:`str`): - The name (project, location, operation id) of the - operation to get. Specified in the format - ``projects/*/locations/*/operations/*``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, operation_id, name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.GetOperationRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if operation_id is not None: - request.operation_id = operation_id - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_operation, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=20.0, - ), - default_timeout=20.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def cancel_operation(self, - request: cluster_service.CancelOperationRequest = None, - *, - project_id: str = None, - zone: str = None, - operation_id: str = None, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels the specified operation. - - Args: - request (:class:`google.container_v1.types.CancelOperationRequest`): - The request object. CancelOperationRequest cancels a - single operation. - project_id (:class:`str`): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the operation resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - operation_id (:class:`str`): - Deprecated. The server-assigned ``name`` of the - operation. This field has been deprecated and replaced - by the name field. - - This corresponds to the ``operation_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - name (:class:`str`): - The name (project, location, operation id) of the - operation to cancel. Specified in the format - ``projects/*/locations/*/operations/*``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, operation_id, name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.CancelOperationRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if operation_id is not None: - request.operation_id = operation_id - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.cancel_operation, - default_timeout=45.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def get_server_config(self, - request: cluster_service.GetServerConfigRequest = None, - *, - project_id: str = None, - zone: str = None, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.ServerConfig: - r"""Returns configuration info about the Google - Kubernetes Engine service. - - Args: - request (:class:`google.container_v1.types.GetServerConfigRequest`): - The request object. Gets the current Kubernetes Engine - service configuration. - project_id (:class:`str`): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Deprecated. The name of the Google Compute Engine - `zone `__ - to return operations for. This field has been deprecated - and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - name (:class:`str`): - The name (project and location) of the server config to - get, specified in the format ``projects/*/locations/*``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.ServerConfig: - Kubernetes Engine service - configuration. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.GetServerConfigRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_server_config, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=20.0, - ), - default_timeout=20.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_json_web_keys(self, - request: cluster_service.GetJSONWebKeysRequest = None, - *, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.GetJSONWebKeysResponse: - r"""Gets the public component of the cluster signing keys - in JSON Web Key format. - This API is not yet intended for general use, and is not - available for all clusters. - - Args: - request (:class:`google.container_v1.types.GetJSONWebKeysRequest`): - The request object. GetJSONWebKeysRequest gets the - public component of the keys used by the cluster to sign - token requests. This will be the jwks_uri for the - discover document returned by getOpenIDConfig. See the - OpenID Connect Discovery 1.0 specification for details. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.GetJSONWebKeysResponse: - GetJSONWebKeysResponse is a valid - JSON Web Key Set as specififed in rfc - 7517 - - """ - # Create or coerce a protobuf request object. - request = cluster_service.GetJSONWebKeysRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_json_web_keys, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_node_pools(self, - request: cluster_service.ListNodePoolsRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.ListNodePoolsResponse: - r"""Lists the node pools for a cluster. - - Args: - request (:class:`google.container_v1.types.ListNodePoolsRequest`): - The request object. ListNodePoolsRequest lists the node - pool(s) for a cluster. - project_id (:class:`str`): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the - parent field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the parent field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (:class:`str`): - Deprecated. The name of the cluster. - This field has been deprecated and - replaced by the parent field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - parent (:class:`str`): - The parent (project, location, cluster id) where the - node pools will be listed. Specified in the format - ``projects/*/locations/*/clusters/*``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.ListNodePoolsResponse: - ListNodePoolsResponse is the result - of ListNodePoolsRequest. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.ListNodePoolsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_node_pools, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=20.0, - ), - default_timeout=20.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_node_pool(self, - request: cluster_service.GetNodePoolRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - node_pool_id: str = None, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.NodePool: - r"""Retrieves the requested node pool. - - Args: - request (:class:`google.container_v1.types.GetNodePoolRequest`): - The request object. GetNodePoolRequest retrieves a node - pool for a cluster. - project_id (:class:`str`): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (:class:`str`): - Deprecated. The name of the cluster. - This field has been deprecated and - replaced by the name field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - node_pool_id (:class:`str`): - Deprecated. The name of the node - pool. This field has been deprecated and - replaced by the name field. - - This corresponds to the ``node_pool_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - name (:class:`str`): - The name (project, location, cluster, node pool id) of - the node pool to get. Specified in the format - ``projects/*/locations/*/clusters/*/nodePools/*``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.NodePool: - NodePool contains the name and - configuration for a cluster's node pool. - Node pools are a set of nodes (i.e. - VM's), with a common configuration and - specification, under the control of the - cluster master. They may have a set of - Kubernetes labels applied to them, which - may be used to reference them during pod - scheduling. They may also be resized up - or down, to accommodate the workload. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, node_pool_id, name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.GetNodePoolRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if node_pool_id is not None: - request.node_pool_id = node_pool_id - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_node_pool, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=20.0, - ), - default_timeout=20.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def create_node_pool(self, - request: cluster_service.CreateNodePoolRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - node_pool: cluster_service.NodePool = None, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Creates a node pool for a cluster. - - Args: - request (:class:`google.container_v1.types.CreateNodePoolRequest`): - The request object. CreateNodePoolRequest creates a node - pool for a cluster. - project_id (:class:`str`): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the - parent field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the parent field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (:class:`str`): - Deprecated. The name of the cluster. - This field has been deprecated and - replaced by the parent field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - node_pool (:class:`google.container_v1.types.NodePool`): - Required. The node pool to create. - This corresponds to the ``node_pool`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - parent (:class:`str`): - The parent (project, location, cluster id) where the - node pool will be created. Specified in the format - ``projects/*/locations/*/clusters/*``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, node_pool, parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.CreateNodePoolRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if node_pool is not None: - request.node_pool = node_pool - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_node_pool, - default_timeout=45.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_node_pool(self, - request: cluster_service.DeleteNodePoolRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - node_pool_id: str = None, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Deletes a node pool from a cluster. - - Args: - request (:class:`google.container_v1.types.DeleteNodePoolRequest`): - The request object. DeleteNodePoolRequest deletes a node - pool for a cluster. - project_id (:class:`str`): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (:class:`str`): - Deprecated. The name of the cluster. - This field has been deprecated and - replaced by the name field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - node_pool_id (:class:`str`): - Deprecated. The name of the node pool - to delete. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``node_pool_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - name (:class:`str`): - The name (project, location, cluster, node pool id) of - the node pool to delete. Specified in the format - ``projects/*/locations/*/clusters/*/nodePools/*``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, node_pool_id, name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.DeleteNodePoolRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if node_pool_id is not None: - request.node_pool_id = node_pool_id - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_node_pool, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=20.0, - ), - default_timeout=20.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def rollback_node_pool_upgrade(self, - request: cluster_service.RollbackNodePoolUpgradeRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - node_pool_id: str = None, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Rolls back a previously Aborted or Failed NodePool - upgrade. This makes no changes if the last upgrade - successfully completed. - - Args: - request (:class:`google.container_v1.types.RollbackNodePoolUpgradeRequest`): - The request object. RollbackNodePoolUpgradeRequest - rollbacks the previously Aborted or Failed NodePool - upgrade. This will be an no-op if the last upgrade - successfully completed. - project_id (:class:`str`): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (:class:`str`): - Deprecated. The name of the cluster - to rollback. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - node_pool_id (:class:`str`): - Deprecated. The name of the node pool - to rollback. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``node_pool_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - name (:class:`str`): - The name (project, location, cluster, node pool id) of - the node poll to rollback upgrade. Specified in the - format - ``projects/*/locations/*/clusters/*/nodePools/*``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, node_pool_id, name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.RollbackNodePoolUpgradeRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if node_pool_id is not None: - request.node_pool_id = node_pool_id - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.rollback_node_pool_upgrade, - default_timeout=45.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def set_node_pool_management(self, - request: cluster_service.SetNodePoolManagementRequest = None, - *, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Sets the NodeManagement options for a node pool. - - Args: - request (:class:`google.container_v1.types.SetNodePoolManagementRequest`): - The request object. SetNodePoolManagementRequest sets - the node management properties of a node pool. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - request = cluster_service.SetNodePoolManagementRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.set_node_pool_management, - default_timeout=45.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def set_labels(self, - request: cluster_service.SetLabelsRequest = None, - *, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Sets labels on a cluster. - - Args: - request (:class:`google.container_v1.types.SetLabelsRequest`): - The request object. SetLabelsRequest sets the Google - Cloud Platform labels on a Google Container Engine - cluster, which will in turn set them for Google Compute - Engine resources used by that cluster - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - request = cluster_service.SetLabelsRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.set_labels, - default_timeout=45.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def set_legacy_abac(self, - request: cluster_service.SetLegacyAbacRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - enabled: bool = None, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Enables or disables the ABAC authorization mechanism - on a cluster. - - Args: - request (:class:`google.container_v1.types.SetLegacyAbacRequest`): - The request object. SetLegacyAbacRequest enables or - disables the ABAC authorization mechanism for a cluster. - project_id (:class:`str`): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (:class:`str`): - Deprecated. The name of the cluster - to update. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - enabled (:class:`bool`): - Required. Whether ABAC authorization - will be enabled in the cluster. - - This corresponds to the ``enabled`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - name (:class:`str`): - The name (project, location, cluster id) of the cluster - to set legacy abac. Specified in the format - ``projects/*/locations/*/clusters/*``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, enabled, name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.SetLegacyAbacRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if enabled is not None: - request.enabled = enabled - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.set_legacy_abac, - default_timeout=45.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def start_ip_rotation(self, - request: cluster_service.StartIPRotationRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Starts master IP rotation. - - Args: - request (:class:`google.container_v1.types.StartIPRotationRequest`): - The request object. StartIPRotationRequest creates a new - IP for the cluster and then performs a node upgrade on - each node pool to point to the new IP. - project_id (:class:`str`): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (:class:`str`): - Deprecated. The name of the cluster. - This field has been deprecated and - replaced by the name field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - name (:class:`str`): - The name (project, location, cluster id) of the cluster - to start IP rotation. Specified in the format - ``projects/*/locations/*/clusters/*``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.StartIPRotationRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.start_ip_rotation, - default_timeout=45.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def complete_ip_rotation(self, - request: cluster_service.CompleteIPRotationRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Completes master IP rotation. - - Args: - request (:class:`google.container_v1.types.CompleteIPRotationRequest`): - The request object. CompleteIPRotationRequest moves the - cluster master back into single-IP mode. - project_id (:class:`str`): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (:class:`str`): - Deprecated. The name of the cluster. - This field has been deprecated and - replaced by the name field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - name (:class:`str`): - The name (project, location, cluster id) of the cluster - to complete IP rotation. Specified in the format - ``projects/*/locations/*/clusters/*``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.CompleteIPRotationRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.complete_ip_rotation, - default_timeout=45.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def set_node_pool_size(self, - request: cluster_service.SetNodePoolSizeRequest = None, - *, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Sets the size for a specific node pool. - - Args: - request (:class:`google.container_v1.types.SetNodePoolSizeRequest`): - The request object. SetNodePoolSizeRequest sets the size - a node pool. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - request = cluster_service.SetNodePoolSizeRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.set_node_pool_size, - default_timeout=45.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def set_network_policy(self, - request: cluster_service.SetNetworkPolicyRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - network_policy: cluster_service.NetworkPolicy = None, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Enables or disables Network Policy for a cluster. - - Args: - request (:class:`google.container_v1.types.SetNetworkPolicyRequest`): - The request object. SetNetworkPolicyRequest - enables/disables network policy for a cluster. - project_id (:class:`str`): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (:class:`str`): - Deprecated. The name of the cluster. - This field has been deprecated and - replaced by the name field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - network_policy (:class:`google.container_v1.types.NetworkPolicy`): - Required. Configuration options for - the NetworkPolicy feature. - - This corresponds to the ``network_policy`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - name (:class:`str`): - The name (project, location, cluster id) of the cluster - to set networking policy. Specified in the format - ``projects/*/locations/*/clusters/*``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, network_policy, name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.SetNetworkPolicyRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if network_policy is not None: - request.network_policy = network_policy - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.set_network_policy, - default_timeout=45.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def set_maintenance_policy(self, - request: cluster_service.SetMaintenancePolicyRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - maintenance_policy: cluster_service.MaintenancePolicy = None, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Sets the maintenance policy for a cluster. - - Args: - request (:class:`google.container_v1.types.SetMaintenancePolicyRequest`): - The request object. SetMaintenancePolicyRequest sets the - maintenance policy for a cluster. - project_id (:class:`str`): - Required. The Google Developers Console `project ID or - project - number `__. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Required. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (:class:`str`): - Required. The name of the cluster to - update. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - maintenance_policy (:class:`google.container_v1.types.MaintenancePolicy`): - Required. The maintenance policy to - be set for the cluster. An empty field - clears the existing maintenance policy. - - This corresponds to the ``maintenance_policy`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - name (:class:`str`): - The name (project, location, cluster id) of the cluster - to set maintenance policy. Specified in the format - ``projects/*/locations/*/clusters/*``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, maintenance_policy, name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.SetMaintenancePolicyRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if maintenance_policy is not None: - request.maintenance_policy = maintenance_policy - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.set_maintenance_policy, - default_timeout=45.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_usable_subnetworks(self, - request: cluster_service.ListUsableSubnetworksRequest = None, - *, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListUsableSubnetworksAsyncPager: - r"""Lists subnetworks that are usable for creating - clusters in a project. - - Args: - request (:class:`google.container_v1.types.ListUsableSubnetworksRequest`): - The request object. ListUsableSubnetworksRequest - requests the list of usable subnetworks available to a - user for creating clusters. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.services.cluster_manager.pagers.ListUsableSubnetworksAsyncPager: - ListUsableSubnetworksResponse is the - response of - ListUsableSubnetworksRequest. - - Iterating over this object will yield - results and resolve additional pages - automatically. - - """ - # Create or coerce a protobuf request object. - request = cluster_service.ListUsableSubnetworksRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_usable_subnetworks, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListUsableSubnetworksAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-container", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "ClusterManagerAsyncClient", -) diff --git a/owl-bot-staging/v1/google/container_v1/services/cluster_manager/client.py b/owl-bot-staging/v1/google/container_v1/services/cluster_manager/client.py deleted file mode 100644 index 2999b112..00000000 --- a/owl-bot-staging/v1/google/container_v1/services/cluster_manager/client.py +++ /dev/null @@ -1,3731 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from distutils import util -import os -import re -from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources -import warnings - -from google.api_core import client_options as client_options_lib # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.container_v1.services.cluster_manager import pagers -from google.container_v1.types import cluster_service -from .transports.base import ClusterManagerTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import ClusterManagerGrpcTransport -from .transports.grpc_asyncio import ClusterManagerGrpcAsyncIOTransport - - -class ClusterManagerClientMeta(type): - """Metaclass for the ClusterManager client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[ClusterManagerTransport]] - _transport_registry["grpc"] = ClusterManagerGrpcTransport - _transport_registry["grpc_asyncio"] = ClusterManagerGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[ClusterManagerTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class ClusterManagerClient(metaclass=ClusterManagerClientMeta): - """Google Kubernetes Engine Cluster Manager v1""" - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "container.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - ClusterManagerClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - ClusterManagerClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> ClusterManagerTransport: - """Returns the transport used by the client instance. - - Returns: - ClusterManagerTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, ClusterManagerTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the cluster manager client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ClusterManagerTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, ClusterManagerTransport): - # transport is a ClusterManagerTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - always_use_jwt_access=( - Transport == type(self).get_transport_class("grpc") - or Transport == type(self).get_transport_class("grpc_asyncio") - ), - ) - - def list_clusters(self, - request: cluster_service.ListClustersRequest = None, - *, - project_id: str = None, - zone: str = None, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.ListClustersResponse: - r"""Lists all clusters owned by a project in either the - specified zone or all zones. - - Args: - request (google.container_v1.types.ListClustersRequest): - The request object. ListClustersRequest lists clusters. - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the - parent field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides, or "-" for all zones. This - field has been deprecated and replaced by the parent - field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - parent (str): - The parent (project and location) where the clusters - will be listed. Specified in the format - ``projects/*/locations/*``. Location "-" matches all - zones and all regions. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.ListClustersResponse: - ListClustersResponse is the result of - ListClustersRequest. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.ListClustersRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.ListClustersRequest): - request = cluster_service.ListClustersRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_clusters] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_cluster(self, - request: cluster_service.GetClusterRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Cluster: - r"""Gets the details of a specific cluster. - - Args: - request (google.container_v1.types.GetClusterRequest): - The request object. GetClusterRequest gets the settings - of a cluster. - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (str): - Deprecated. The name of the cluster - to retrieve. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - name (str): - The name (project, location, cluster) of the cluster to - retrieve. Specified in the format - ``projects/*/locations/*/clusters/*``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.Cluster: - A Google Kubernetes Engine cluster. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.GetClusterRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.GetClusterRequest): - request = cluster_service.GetClusterRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_cluster] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def create_cluster(self, - request: cluster_service.CreateClusterRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster: cluster_service.Cluster = None, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Creates a cluster, consisting of the specified number and type - of Google Compute Engine instances. - - By default, the cluster is created in the project's `default - network `__. - - One firewall is added for the cluster. After cluster creation, - the Kubelet creates routes for each node to allow the containers - on that node to communicate with all other instances in the - cluster. - - Finally, an entry is added to the project's global metadata - indicating which CIDR range the cluster is using. - - Args: - request (google.container_v1.types.CreateClusterRequest): - The request object. CreateClusterRequest creates a - cluster. - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the - parent field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the parent field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster (google.container_v1.types.Cluster): - Required. A `cluster - resource `__ - - This corresponds to the ``cluster`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - parent (str): - The parent (project and location) where the cluster will - be created. Specified in the format - ``projects/*/locations/*``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster, parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.CreateClusterRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.CreateClusterRequest): - request = cluster_service.CreateClusterRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster is not None: - request.cluster = cluster - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_cluster] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_cluster(self, - request: cluster_service.UpdateClusterRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - update: cluster_service.ClusterUpdate = None, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Updates the settings of a specific cluster. - - Args: - request (google.container_v1.types.UpdateClusterRequest): - The request object. UpdateClusterRequest updates the - settings of a cluster. - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (str): - Deprecated. The name of the cluster - to upgrade. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update (google.container_v1.types.ClusterUpdate): - Required. A description of the - update. - - This corresponds to the ``update`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - name (str): - The name (project, location, cluster) of the cluster to - update. Specified in the format - ``projects/*/locations/*/clusters/*``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, update, name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.UpdateClusterRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.UpdateClusterRequest): - request = cluster_service.UpdateClusterRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if update is not None: - request.update = update - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_cluster] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_node_pool(self, - request: cluster_service.UpdateNodePoolRequest = None, - *, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Updates the version and/or image type for the - specified node pool. - - Args: - request (google.container_v1.types.UpdateNodePoolRequest): - The request object. UpdateNodePoolRequests update a node - pool's image and/or version. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.UpdateNodePoolRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.UpdateNodePoolRequest): - request = cluster_service.UpdateNodePoolRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_node_pool] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def set_node_pool_autoscaling(self, - request: cluster_service.SetNodePoolAutoscalingRequest = None, - *, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Sets the autoscaling settings for the specified node - pool. - - Args: - request (google.container_v1.types.SetNodePoolAutoscalingRequest): - The request object. SetNodePoolAutoscalingRequest sets - the autoscaler settings of a node pool. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.SetNodePoolAutoscalingRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.SetNodePoolAutoscalingRequest): - request = cluster_service.SetNodePoolAutoscalingRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.set_node_pool_autoscaling] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def set_logging_service(self, - request: cluster_service.SetLoggingServiceRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - logging_service: str = None, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Sets the logging service for a specific cluster. - - Args: - request (google.container_v1.types.SetLoggingServiceRequest): - The request object. SetLoggingServiceRequest sets the - logging service of a cluster. - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (str): - Deprecated. The name of the cluster - to upgrade. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - logging_service (str): - Required. The logging service the cluster should use to - write logs. Currently available options: - - - ``logging.googleapis.com/kubernetes`` - The Cloud - Logging service with a Kubernetes-native resource - model - - ``logging.googleapis.com`` - The legacy Cloud Logging - service (no longer available as of GKE 1.15). - - ``none`` - no logs will be exported from the cluster. - - If left as an empty - string,\ ``logging.googleapis.com/kubernetes`` will be - used for GKE 1.14+ or ``logging.googleapis.com`` for - earlier versions. - - This corresponds to the ``logging_service`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - name (str): - The name (project, location, cluster) of the cluster to - set logging. Specified in the format - ``projects/*/locations/*/clusters/*``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, logging_service, name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.SetLoggingServiceRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.SetLoggingServiceRequest): - request = cluster_service.SetLoggingServiceRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if logging_service is not None: - request.logging_service = logging_service - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.set_logging_service] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def set_monitoring_service(self, - request: cluster_service.SetMonitoringServiceRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - monitoring_service: str = None, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Sets the monitoring service for a specific cluster. - - Args: - request (google.container_v1.types.SetMonitoringServiceRequest): - The request object. SetMonitoringServiceRequest sets the - monitoring service of a cluster. - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (str): - Deprecated. The name of the cluster - to upgrade. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - monitoring_service (str): - Required. The monitoring service the cluster should use - to write metrics. Currently available options: - - - "monitoring.googleapis.com/kubernetes" - The Cloud - Monitoring service with a Kubernetes-native resource - model - - ``monitoring.googleapis.com`` - The legacy Cloud - Monitoring service (no longer available as of GKE - 1.15). - - ``none`` - No metrics will be exported from the - cluster. - - If left as an empty - string,\ ``monitoring.googleapis.com/kubernetes`` will - be used for GKE 1.14+ or ``monitoring.googleapis.com`` - for earlier versions. - - This corresponds to the ``monitoring_service`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - name (str): - The name (project, location, cluster) of the cluster to - set monitoring. Specified in the format - ``projects/*/locations/*/clusters/*``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, monitoring_service, name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.SetMonitoringServiceRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.SetMonitoringServiceRequest): - request = cluster_service.SetMonitoringServiceRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if monitoring_service is not None: - request.monitoring_service = monitoring_service - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.set_monitoring_service] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def set_addons_config(self, - request: cluster_service.SetAddonsConfigRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - addons_config: cluster_service.AddonsConfig = None, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Sets the addons for a specific cluster. - - Args: - request (google.container_v1.types.SetAddonsConfigRequest): - The request object. SetAddonsConfigRequest sets the - addons associated with the cluster. - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (str): - Deprecated. The name of the cluster - to upgrade. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - addons_config (google.container_v1.types.AddonsConfig): - Required. The desired configurations - for the various addons available to run - in the cluster. - - This corresponds to the ``addons_config`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - name (str): - The name (project, location, cluster) of the cluster to - set addons. Specified in the format - ``projects/*/locations/*/clusters/*``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, addons_config, name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.SetAddonsConfigRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.SetAddonsConfigRequest): - request = cluster_service.SetAddonsConfigRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if addons_config is not None: - request.addons_config = addons_config - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.set_addons_config] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def set_locations(self, - request: cluster_service.SetLocationsRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - locations: Sequence[str] = None, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Sets the locations for a specific cluster. Deprecated. Use - `projects.locations.clusters.update `__ - instead. - - Args: - request (google.container_v1.types.SetLocationsRequest): - The request object. SetLocationsRequest sets the - locations of the cluster. - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (str): - Deprecated. The name of the cluster - to upgrade. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - locations (Sequence[str]): - Required. The desired list of Google Compute Engine - `zones `__ - in which the cluster's nodes should be located. Changing - the locations a cluster is in will result in nodes being - either created or removed from the cluster, depending on - whether locations are being added or removed. - - This list must always include the cluster's primary - zone. - - This corresponds to the ``locations`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - name (str): - The name (project, location, cluster) of the cluster to - set locations. Specified in the format - ``projects/*/locations/*/clusters/*``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - warnings.warn("ClusterManagerClient.set_locations is deprecated", - DeprecationWarning) - - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, locations, name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.SetLocationsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.SetLocationsRequest): - request = cluster_service.SetLocationsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if locations is not None: - request.locations = locations - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.set_locations] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_master(self, - request: cluster_service.UpdateMasterRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - master_version: str = None, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Updates the master for a specific cluster. - - Args: - request (google.container_v1.types.UpdateMasterRequest): - The request object. UpdateMasterRequest updates the - master of the cluster. - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (str): - Deprecated. The name of the cluster - to upgrade. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - master_version (str): - Required. The Kubernetes version to - change the master to. - Users may specify either explicit - versions offered by Kubernetes Engine or - version aliases, which have the - following behavior: - - "latest": picks the highest valid - Kubernetes version - "1.X": picks the - highest valid patch+gke.N patch in the - 1.X version - "1.X.Y": picks the highest - valid gke.N patch in the 1.X.Y version - - "1.X.Y-gke.N": picks an explicit - Kubernetes version - "-": picks the - default Kubernetes version - - This corresponds to the ``master_version`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - name (str): - The name (project, location, cluster) of the cluster to - update. Specified in the format - ``projects/*/locations/*/clusters/*``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, master_version, name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.UpdateMasterRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.UpdateMasterRequest): - request = cluster_service.UpdateMasterRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if master_version is not None: - request.master_version = master_version - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_master] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def set_master_auth(self, - request: cluster_service.SetMasterAuthRequest = None, - *, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Sets master auth materials. Currently supports - changing the admin password or a specific cluster, - either via password generation or explicitly setting the - password. - - Args: - request (google.container_v1.types.SetMasterAuthRequest): - The request object. SetMasterAuthRequest updates the - admin password of a cluster. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.SetMasterAuthRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.SetMasterAuthRequest): - request = cluster_service.SetMasterAuthRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.set_master_auth] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_cluster(self, - request: cluster_service.DeleteClusterRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Deletes the cluster, including the Kubernetes - endpoint and all worker nodes. - - Firewalls and routes that were configured during cluster - creation are also deleted. - - Other Google Compute Engine resources that might be in - use by the cluster, such as load balancer resources, are - not deleted if they weren't present when the cluster was - initially created. - - Args: - request (google.container_v1.types.DeleteClusterRequest): - The request object. DeleteClusterRequest deletes a - cluster. - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (str): - Deprecated. The name of the cluster - to delete. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - name (str): - The name (project, location, cluster) of the cluster to - delete. Specified in the format - ``projects/*/locations/*/clusters/*``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.DeleteClusterRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.DeleteClusterRequest): - request = cluster_service.DeleteClusterRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_cluster] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_operations(self, - request: cluster_service.ListOperationsRequest = None, - *, - project_id: str = None, - zone: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.ListOperationsResponse: - r"""Lists all operations in a project in a specific zone - or all zones. - - Args: - request (google.container_v1.types.ListOperationsRequest): - The request object. ListOperationsRequest lists - operations. - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the - parent field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - to return operations for, or ``-`` for all zones. This - field has been deprecated and replaced by the parent - field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.ListOperationsResponse: - ListOperationsResponse is the result - of ListOperationsRequest. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.ListOperationsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.ListOperationsRequest): - request = cluster_service.ListOperationsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_operations] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_operation(self, - request: cluster_service.GetOperationRequest = None, - *, - project_id: str = None, - zone: str = None, - operation_id: str = None, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Gets the specified operation. - - Args: - request (google.container_v1.types.GetOperationRequest): - The request object. GetOperationRequest gets a single - operation. - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - operation_id (str): - Deprecated. The server-assigned ``name`` of the - operation. This field has been deprecated and replaced - by the name field. - - This corresponds to the ``operation_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - name (str): - The name (project, location, operation id) of the - operation to get. Specified in the format - ``projects/*/locations/*/operations/*``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, operation_id, name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.GetOperationRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.GetOperationRequest): - request = cluster_service.GetOperationRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if operation_id is not None: - request.operation_id = operation_id - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_operation] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def cancel_operation(self, - request: cluster_service.CancelOperationRequest = None, - *, - project_id: str = None, - zone: str = None, - operation_id: str = None, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels the specified operation. - - Args: - request (google.container_v1.types.CancelOperationRequest): - The request object. CancelOperationRequest cancels a - single operation. - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the operation resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - operation_id (str): - Deprecated. The server-assigned ``name`` of the - operation. This field has been deprecated and replaced - by the name field. - - This corresponds to the ``operation_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - name (str): - The name (project, location, operation id) of the - operation to cancel. Specified in the format - ``projects/*/locations/*/operations/*``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, operation_id, name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.CancelOperationRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.CancelOperationRequest): - request = cluster_service.CancelOperationRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if operation_id is not None: - request.operation_id = operation_id - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.cancel_operation] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def get_server_config(self, - request: cluster_service.GetServerConfigRequest = None, - *, - project_id: str = None, - zone: str = None, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.ServerConfig: - r"""Returns configuration info about the Google - Kubernetes Engine service. - - Args: - request (google.container_v1.types.GetServerConfigRequest): - The request object. Gets the current Kubernetes Engine - service configuration. - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - to return operations for. This field has been deprecated - and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - name (str): - The name (project and location) of the server config to - get, specified in the format ``projects/*/locations/*``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.ServerConfig: - Kubernetes Engine service - configuration. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.GetServerConfigRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.GetServerConfigRequest): - request = cluster_service.GetServerConfigRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_server_config] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_json_web_keys(self, - request: cluster_service.GetJSONWebKeysRequest = None, - *, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.GetJSONWebKeysResponse: - r"""Gets the public component of the cluster signing keys - in JSON Web Key format. - This API is not yet intended for general use, and is not - available for all clusters. - - Args: - request (google.container_v1.types.GetJSONWebKeysRequest): - The request object. GetJSONWebKeysRequest gets the - public component of the keys used by the cluster to sign - token requests. This will be the jwks_uri for the - discover document returned by getOpenIDConfig. See the - OpenID Connect Discovery 1.0 specification for details. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.GetJSONWebKeysResponse: - GetJSONWebKeysResponse is a valid - JSON Web Key Set as specififed in rfc - 7517 - - """ - # Create or coerce a protobuf request object. - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.GetJSONWebKeysRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.GetJSONWebKeysRequest): - request = cluster_service.GetJSONWebKeysRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_json_web_keys] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_node_pools(self, - request: cluster_service.ListNodePoolsRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.ListNodePoolsResponse: - r"""Lists the node pools for a cluster. - - Args: - request (google.container_v1.types.ListNodePoolsRequest): - The request object. ListNodePoolsRequest lists the node - pool(s) for a cluster. - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the - parent field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the parent field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (str): - Deprecated. The name of the cluster. - This field has been deprecated and - replaced by the parent field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - parent (str): - The parent (project, location, cluster id) where the - node pools will be listed. Specified in the format - ``projects/*/locations/*/clusters/*``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.ListNodePoolsResponse: - ListNodePoolsResponse is the result - of ListNodePoolsRequest. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.ListNodePoolsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.ListNodePoolsRequest): - request = cluster_service.ListNodePoolsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_node_pools] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_node_pool(self, - request: cluster_service.GetNodePoolRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - node_pool_id: str = None, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.NodePool: - r"""Retrieves the requested node pool. - - Args: - request (google.container_v1.types.GetNodePoolRequest): - The request object. GetNodePoolRequest retrieves a node - pool for a cluster. - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (str): - Deprecated. The name of the cluster. - This field has been deprecated and - replaced by the name field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - node_pool_id (str): - Deprecated. The name of the node - pool. This field has been deprecated and - replaced by the name field. - - This corresponds to the ``node_pool_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - name (str): - The name (project, location, cluster, node pool id) of - the node pool to get. Specified in the format - ``projects/*/locations/*/clusters/*/nodePools/*``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.NodePool: - NodePool contains the name and - configuration for a cluster's node pool. - Node pools are a set of nodes (i.e. - VM's), with a common configuration and - specification, under the control of the - cluster master. They may have a set of - Kubernetes labels applied to them, which - may be used to reference them during pod - scheduling. They may also be resized up - or down, to accommodate the workload. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, node_pool_id, name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.GetNodePoolRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.GetNodePoolRequest): - request = cluster_service.GetNodePoolRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if node_pool_id is not None: - request.node_pool_id = node_pool_id - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_node_pool] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def create_node_pool(self, - request: cluster_service.CreateNodePoolRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - node_pool: cluster_service.NodePool = None, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Creates a node pool for a cluster. - - Args: - request (google.container_v1.types.CreateNodePoolRequest): - The request object. CreateNodePoolRequest creates a node - pool for a cluster. - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the - parent field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the parent field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (str): - Deprecated. The name of the cluster. - This field has been deprecated and - replaced by the parent field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - node_pool (google.container_v1.types.NodePool): - Required. The node pool to create. - This corresponds to the ``node_pool`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - parent (str): - The parent (project, location, cluster id) where the - node pool will be created. Specified in the format - ``projects/*/locations/*/clusters/*``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, node_pool, parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.CreateNodePoolRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.CreateNodePoolRequest): - request = cluster_service.CreateNodePoolRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if node_pool is not None: - request.node_pool = node_pool - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_node_pool] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_node_pool(self, - request: cluster_service.DeleteNodePoolRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - node_pool_id: str = None, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Deletes a node pool from a cluster. - - Args: - request (google.container_v1.types.DeleteNodePoolRequest): - The request object. DeleteNodePoolRequest deletes a node - pool for a cluster. - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (str): - Deprecated. The name of the cluster. - This field has been deprecated and - replaced by the name field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - node_pool_id (str): - Deprecated. The name of the node pool - to delete. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``node_pool_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - name (str): - The name (project, location, cluster, node pool id) of - the node pool to delete. Specified in the format - ``projects/*/locations/*/clusters/*/nodePools/*``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, node_pool_id, name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.DeleteNodePoolRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.DeleteNodePoolRequest): - request = cluster_service.DeleteNodePoolRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if node_pool_id is not None: - request.node_pool_id = node_pool_id - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_node_pool] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def rollback_node_pool_upgrade(self, - request: cluster_service.RollbackNodePoolUpgradeRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - node_pool_id: str = None, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Rolls back a previously Aborted or Failed NodePool - upgrade. This makes no changes if the last upgrade - successfully completed. - - Args: - request (google.container_v1.types.RollbackNodePoolUpgradeRequest): - The request object. RollbackNodePoolUpgradeRequest - rollbacks the previously Aborted or Failed NodePool - upgrade. This will be an no-op if the last upgrade - successfully completed. - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (str): - Deprecated. The name of the cluster - to rollback. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - node_pool_id (str): - Deprecated. The name of the node pool - to rollback. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``node_pool_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - name (str): - The name (project, location, cluster, node pool id) of - the node poll to rollback upgrade. Specified in the - format - ``projects/*/locations/*/clusters/*/nodePools/*``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, node_pool_id, name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.RollbackNodePoolUpgradeRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.RollbackNodePoolUpgradeRequest): - request = cluster_service.RollbackNodePoolUpgradeRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if node_pool_id is not None: - request.node_pool_id = node_pool_id - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.rollback_node_pool_upgrade] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def set_node_pool_management(self, - request: cluster_service.SetNodePoolManagementRequest = None, - *, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Sets the NodeManagement options for a node pool. - - Args: - request (google.container_v1.types.SetNodePoolManagementRequest): - The request object. SetNodePoolManagementRequest sets - the node management properties of a node pool. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.SetNodePoolManagementRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.SetNodePoolManagementRequest): - request = cluster_service.SetNodePoolManagementRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.set_node_pool_management] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def set_labels(self, - request: cluster_service.SetLabelsRequest = None, - *, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Sets labels on a cluster. - - Args: - request (google.container_v1.types.SetLabelsRequest): - The request object. SetLabelsRequest sets the Google - Cloud Platform labels on a Google Container Engine - cluster, which will in turn set them for Google Compute - Engine resources used by that cluster - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.SetLabelsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.SetLabelsRequest): - request = cluster_service.SetLabelsRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.set_labels] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def set_legacy_abac(self, - request: cluster_service.SetLegacyAbacRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - enabled: bool = None, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Enables or disables the ABAC authorization mechanism - on a cluster. - - Args: - request (google.container_v1.types.SetLegacyAbacRequest): - The request object. SetLegacyAbacRequest enables or - disables the ABAC authorization mechanism for a cluster. - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (str): - Deprecated. The name of the cluster - to update. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - enabled (bool): - Required. Whether ABAC authorization - will be enabled in the cluster. - - This corresponds to the ``enabled`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - name (str): - The name (project, location, cluster id) of the cluster - to set legacy abac. Specified in the format - ``projects/*/locations/*/clusters/*``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, enabled, name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.SetLegacyAbacRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.SetLegacyAbacRequest): - request = cluster_service.SetLegacyAbacRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if enabled is not None: - request.enabled = enabled - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.set_legacy_abac] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def start_ip_rotation(self, - request: cluster_service.StartIPRotationRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Starts master IP rotation. - - Args: - request (google.container_v1.types.StartIPRotationRequest): - The request object. StartIPRotationRequest creates a new - IP for the cluster and then performs a node upgrade on - each node pool to point to the new IP. - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (str): - Deprecated. The name of the cluster. - This field has been deprecated and - replaced by the name field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - name (str): - The name (project, location, cluster id) of the cluster - to start IP rotation. Specified in the format - ``projects/*/locations/*/clusters/*``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.StartIPRotationRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.StartIPRotationRequest): - request = cluster_service.StartIPRotationRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.start_ip_rotation] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def complete_ip_rotation(self, - request: cluster_service.CompleteIPRotationRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Completes master IP rotation. - - Args: - request (google.container_v1.types.CompleteIPRotationRequest): - The request object. CompleteIPRotationRequest moves the - cluster master back into single-IP mode. - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (str): - Deprecated. The name of the cluster. - This field has been deprecated and - replaced by the name field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - name (str): - The name (project, location, cluster id) of the cluster - to complete IP rotation. Specified in the format - ``projects/*/locations/*/clusters/*``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.CompleteIPRotationRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.CompleteIPRotationRequest): - request = cluster_service.CompleteIPRotationRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.complete_ip_rotation] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def set_node_pool_size(self, - request: cluster_service.SetNodePoolSizeRequest = None, - *, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Sets the size for a specific node pool. - - Args: - request (google.container_v1.types.SetNodePoolSizeRequest): - The request object. SetNodePoolSizeRequest sets the size - a node pool. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.SetNodePoolSizeRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.SetNodePoolSizeRequest): - request = cluster_service.SetNodePoolSizeRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.set_node_pool_size] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def set_network_policy(self, - request: cluster_service.SetNetworkPolicyRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - network_policy: cluster_service.NetworkPolicy = None, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Enables or disables Network Policy for a cluster. - - Args: - request (google.container_v1.types.SetNetworkPolicyRequest): - The request object. SetNetworkPolicyRequest - enables/disables network policy for a cluster. - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (str): - Deprecated. The name of the cluster. - This field has been deprecated and - replaced by the name field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - network_policy (google.container_v1.types.NetworkPolicy): - Required. Configuration options for - the NetworkPolicy feature. - - This corresponds to the ``network_policy`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - name (str): - The name (project, location, cluster id) of the cluster - to set networking policy. Specified in the format - ``projects/*/locations/*/clusters/*``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, network_policy, name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.SetNetworkPolicyRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.SetNetworkPolicyRequest): - request = cluster_service.SetNetworkPolicyRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if network_policy is not None: - request.network_policy = network_policy - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.set_network_policy] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def set_maintenance_policy(self, - request: cluster_service.SetMaintenancePolicyRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - maintenance_policy: cluster_service.MaintenancePolicy = None, - name: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Sets the maintenance policy for a cluster. - - Args: - request (google.container_v1.types.SetMaintenancePolicyRequest): - The request object. SetMaintenancePolicyRequest sets the - maintenance policy for a cluster. - project_id (str): - Required. The Google Developers Console `project ID or - project - number `__. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Required. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (str): - Required. The name of the cluster to - update. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - maintenance_policy (google.container_v1.types.MaintenancePolicy): - Required. The maintenance policy to - be set for the cluster. An empty field - clears the existing maintenance policy. - - This corresponds to the ``maintenance_policy`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - name (str): - The name (project, location, cluster id) of the cluster - to set maintenance policy. Specified in the format - ``projects/*/locations/*/clusters/*``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, maintenance_policy, name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.SetMaintenancePolicyRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.SetMaintenancePolicyRequest): - request = cluster_service.SetMaintenancePolicyRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if maintenance_policy is not None: - request.maintenance_policy = maintenance_policy - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.set_maintenance_policy] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_usable_subnetworks(self, - request: cluster_service.ListUsableSubnetworksRequest = None, - *, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListUsableSubnetworksPager: - r"""Lists subnetworks that are usable for creating - clusters in a project. - - Args: - request (google.container_v1.types.ListUsableSubnetworksRequest): - The request object. ListUsableSubnetworksRequest - requests the list of usable subnetworks available to a - user for creating clusters. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1.services.cluster_manager.pagers.ListUsableSubnetworksPager: - ListUsableSubnetworksResponse is the - response of - ListUsableSubnetworksRequest. - - Iterating over this object will yield - results and resolve additional pages - automatically. - - """ - # Create or coerce a protobuf request object. - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.ListUsableSubnetworksRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.ListUsableSubnetworksRequest): - request = cluster_service.ListUsableSubnetworksRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_usable_subnetworks] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListUsableSubnetworksPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-container", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "ClusterManagerClient", -) diff --git a/owl-bot-staging/v1/google/container_v1/services/cluster_manager/pagers.py b/owl-bot-staging/v1/google/container_v1/services/cluster_manager/pagers.py deleted file mode 100644 index b8360b94..00000000 --- a/owl-bot-staging/v1/google/container_v1/services/cluster_manager/pagers.py +++ /dev/null @@ -1,140 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional - -from google.container_v1.types import cluster_service - - -class ListUsableSubnetworksPager: - """A pager for iterating through ``list_usable_subnetworks`` requests. - - This class thinly wraps an initial - :class:`google.container_v1.types.ListUsableSubnetworksResponse` object, and - provides an ``__iter__`` method to iterate through its - ``subnetworks`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListUsableSubnetworks`` requests and continue to iterate - through the ``subnetworks`` field on the - corresponding responses. - - All the usual :class:`google.container_v1.types.ListUsableSubnetworksResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., cluster_service.ListUsableSubnetworksResponse], - request: cluster_service.ListUsableSubnetworksRequest, - response: cluster_service.ListUsableSubnetworksResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.container_v1.types.ListUsableSubnetworksRequest): - The initial request object. - response (google.container_v1.types.ListUsableSubnetworksResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = cluster_service.ListUsableSubnetworksRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[cluster_service.ListUsableSubnetworksResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[cluster_service.UsableSubnetwork]: - for page in self.pages: - yield from page.subnetworks - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListUsableSubnetworksAsyncPager: - """A pager for iterating through ``list_usable_subnetworks`` requests. - - This class thinly wraps an initial - :class:`google.container_v1.types.ListUsableSubnetworksResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``subnetworks`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListUsableSubnetworks`` requests and continue to iterate - through the ``subnetworks`` field on the - corresponding responses. - - All the usual :class:`google.container_v1.types.ListUsableSubnetworksResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[cluster_service.ListUsableSubnetworksResponse]], - request: cluster_service.ListUsableSubnetworksRequest, - response: cluster_service.ListUsableSubnetworksResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.container_v1.types.ListUsableSubnetworksRequest): - The initial request object. - response (google.container_v1.types.ListUsableSubnetworksResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = cluster_service.ListUsableSubnetworksRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[cluster_service.ListUsableSubnetworksResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[cluster_service.UsableSubnetwork]: - async def async_generator(): - async for page in self.pages: - for response in page.subnetworks: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/container_v1/services/cluster_manager/transports/__init__.py b/owl-bot-staging/v1/google/container_v1/services/cluster_manager/transports/__init__.py deleted file mode 100644 index 32ea8716..00000000 --- a/owl-bot-staging/v1/google/container_v1/services/cluster_manager/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import ClusterManagerTransport -from .grpc import ClusterManagerGrpcTransport -from .grpc_asyncio import ClusterManagerGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[ClusterManagerTransport]] -_transport_registry['grpc'] = ClusterManagerGrpcTransport -_transport_registry['grpc_asyncio'] = ClusterManagerGrpcAsyncIOTransport - -__all__ = ( - 'ClusterManagerTransport', - 'ClusterManagerGrpcTransport', - 'ClusterManagerGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/container_v1/services/cluster_manager/transports/base.py b/owl-bot-staging/v1/google/container_v1/services/cluster_manager/transports/base.py deleted file mode 100644 index d026d49c..00000000 --- a/owl-bot-staging/v1/google/container_v1/services/cluster_manager/transports/base.py +++ /dev/null @@ -1,666 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import packaging.version -import pkg_resources - -import google.auth # type: ignore -import google.api_core # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.container_v1.types import cluster_service -from google.protobuf import empty_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-container', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - -try: - # google.auth.__version__ was added in 1.26.0 - _GOOGLE_AUTH_VERSION = google.auth.__version__ -except AttributeError: - try: # try pkg_resources if it is available - _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version - except pkg_resources.DistributionNotFound: # pragma: NO COVER - _GOOGLE_AUTH_VERSION = None - - -class ClusterManagerTransport(abc.ABC): - """Abstract transport class for ClusterManager.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'container.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials is service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - # TODO(busunkim): This method is in the base transport - # to avoid duplicating code across the transport classes. These functions - # should be deleted once the minimum required versions of google-auth is increased. - - # TODO: Remove this function once google-auth >= 1.25.0 is required - @classmethod - def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: - """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" - - scopes_kwargs = {} - - if _GOOGLE_AUTH_VERSION and ( - packaging.version.parse(_GOOGLE_AUTH_VERSION) - >= packaging.version.parse("1.25.0") - ): - scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} - else: - scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} - - return scopes_kwargs - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.list_clusters: gapic_v1.method.wrap_method( - self.list_clusters, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=20.0, - ), - default_timeout=20.0, - client_info=client_info, - ), - self.get_cluster: gapic_v1.method.wrap_method( - self.get_cluster, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=20.0, - ), - default_timeout=20.0, - client_info=client_info, - ), - self.create_cluster: gapic_v1.method.wrap_method( - self.create_cluster, - default_timeout=45.0, - client_info=client_info, - ), - self.update_cluster: gapic_v1.method.wrap_method( - self.update_cluster, - default_timeout=45.0, - client_info=client_info, - ), - self.update_node_pool: gapic_v1.method.wrap_method( - self.update_node_pool, - default_timeout=45.0, - client_info=client_info, - ), - self.set_node_pool_autoscaling: gapic_v1.method.wrap_method( - self.set_node_pool_autoscaling, - default_timeout=45.0, - client_info=client_info, - ), - self.set_logging_service: gapic_v1.method.wrap_method( - self.set_logging_service, - default_timeout=45.0, - client_info=client_info, - ), - self.set_monitoring_service: gapic_v1.method.wrap_method( - self.set_monitoring_service, - default_timeout=45.0, - client_info=client_info, - ), - self.set_addons_config: gapic_v1.method.wrap_method( - self.set_addons_config, - default_timeout=45.0, - client_info=client_info, - ), - self.set_locations: gapic_v1.method.wrap_method( - self.set_locations, - default_timeout=45.0, - client_info=client_info, - ), - self.update_master: gapic_v1.method.wrap_method( - self.update_master, - default_timeout=45.0, - client_info=client_info, - ), - self.set_master_auth: gapic_v1.method.wrap_method( - self.set_master_auth, - default_timeout=45.0, - client_info=client_info, - ), - self.delete_cluster: gapic_v1.method.wrap_method( - self.delete_cluster, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=20.0, - ), - default_timeout=20.0, - client_info=client_info, - ), - self.list_operations: gapic_v1.method.wrap_method( - self.list_operations, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=20.0, - ), - default_timeout=20.0, - client_info=client_info, - ), - self.get_operation: gapic_v1.method.wrap_method( - self.get_operation, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=20.0, - ), - default_timeout=20.0, - client_info=client_info, - ), - self.cancel_operation: gapic_v1.method.wrap_method( - self.cancel_operation, - default_timeout=45.0, - client_info=client_info, - ), - self.get_server_config: gapic_v1.method.wrap_method( - self.get_server_config, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=20.0, - ), - default_timeout=20.0, - client_info=client_info, - ), - self.get_json_web_keys: gapic_v1.method.wrap_method( - self.get_json_web_keys, - default_timeout=None, - client_info=client_info, - ), - self.list_node_pools: gapic_v1.method.wrap_method( - self.list_node_pools, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=20.0, - ), - default_timeout=20.0, - client_info=client_info, - ), - self.get_node_pool: gapic_v1.method.wrap_method( - self.get_node_pool, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=20.0, - ), - default_timeout=20.0, - client_info=client_info, - ), - self.create_node_pool: gapic_v1.method.wrap_method( - self.create_node_pool, - default_timeout=45.0, - client_info=client_info, - ), - self.delete_node_pool: gapic_v1.method.wrap_method( - self.delete_node_pool, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=20.0, - ), - default_timeout=20.0, - client_info=client_info, - ), - self.rollback_node_pool_upgrade: gapic_v1.method.wrap_method( - self.rollback_node_pool_upgrade, - default_timeout=45.0, - client_info=client_info, - ), - self.set_node_pool_management: gapic_v1.method.wrap_method( - self.set_node_pool_management, - default_timeout=45.0, - client_info=client_info, - ), - self.set_labels: gapic_v1.method.wrap_method( - self.set_labels, - default_timeout=45.0, - client_info=client_info, - ), - self.set_legacy_abac: gapic_v1.method.wrap_method( - self.set_legacy_abac, - default_timeout=45.0, - client_info=client_info, - ), - self.start_ip_rotation: gapic_v1.method.wrap_method( - self.start_ip_rotation, - default_timeout=45.0, - client_info=client_info, - ), - self.complete_ip_rotation: gapic_v1.method.wrap_method( - self.complete_ip_rotation, - default_timeout=45.0, - client_info=client_info, - ), - self.set_node_pool_size: gapic_v1.method.wrap_method( - self.set_node_pool_size, - default_timeout=45.0, - client_info=client_info, - ), - self.set_network_policy: gapic_v1.method.wrap_method( - self.set_network_policy, - default_timeout=45.0, - client_info=client_info, - ), - self.set_maintenance_policy: gapic_v1.method.wrap_method( - self.set_maintenance_policy, - default_timeout=45.0, - client_info=client_info, - ), - self.list_usable_subnetworks: gapic_v1.method.wrap_method( - self.list_usable_subnetworks, - default_timeout=None, - client_info=client_info, - ), - } - - @property - def list_clusters(self) -> Callable[ - [cluster_service.ListClustersRequest], - Union[ - cluster_service.ListClustersResponse, - Awaitable[cluster_service.ListClustersResponse] - ]]: - raise NotImplementedError() - - @property - def get_cluster(self) -> Callable[ - [cluster_service.GetClusterRequest], - Union[ - cluster_service.Cluster, - Awaitable[cluster_service.Cluster] - ]]: - raise NotImplementedError() - - @property - def create_cluster(self) -> Callable[ - [cluster_service.CreateClusterRequest], - Union[ - cluster_service.Operation, - Awaitable[cluster_service.Operation] - ]]: - raise NotImplementedError() - - @property - def update_cluster(self) -> Callable[ - [cluster_service.UpdateClusterRequest], - Union[ - cluster_service.Operation, - Awaitable[cluster_service.Operation] - ]]: - raise NotImplementedError() - - @property - def update_node_pool(self) -> Callable[ - [cluster_service.UpdateNodePoolRequest], - Union[ - cluster_service.Operation, - Awaitable[cluster_service.Operation] - ]]: - raise NotImplementedError() - - @property - def set_node_pool_autoscaling(self) -> Callable[ - [cluster_service.SetNodePoolAutoscalingRequest], - Union[ - cluster_service.Operation, - Awaitable[cluster_service.Operation] - ]]: - raise NotImplementedError() - - @property - def set_logging_service(self) -> Callable[ - [cluster_service.SetLoggingServiceRequest], - Union[ - cluster_service.Operation, - Awaitable[cluster_service.Operation] - ]]: - raise NotImplementedError() - - @property - def set_monitoring_service(self) -> Callable[ - [cluster_service.SetMonitoringServiceRequest], - Union[ - cluster_service.Operation, - Awaitable[cluster_service.Operation] - ]]: - raise NotImplementedError() - - @property - def set_addons_config(self) -> Callable[ - [cluster_service.SetAddonsConfigRequest], - Union[ - cluster_service.Operation, - Awaitable[cluster_service.Operation] - ]]: - raise NotImplementedError() - - @property - def set_locations(self) -> Callable[ - [cluster_service.SetLocationsRequest], - Union[ - cluster_service.Operation, - Awaitable[cluster_service.Operation] - ]]: - raise NotImplementedError() - - @property - def update_master(self) -> Callable[ - [cluster_service.UpdateMasterRequest], - Union[ - cluster_service.Operation, - Awaitable[cluster_service.Operation] - ]]: - raise NotImplementedError() - - @property - def set_master_auth(self) -> Callable[ - [cluster_service.SetMasterAuthRequest], - Union[ - cluster_service.Operation, - Awaitable[cluster_service.Operation] - ]]: - raise NotImplementedError() - - @property - def delete_cluster(self) -> Callable[ - [cluster_service.DeleteClusterRequest], - Union[ - cluster_service.Operation, - Awaitable[cluster_service.Operation] - ]]: - raise NotImplementedError() - - @property - def list_operations(self) -> Callable[ - [cluster_service.ListOperationsRequest], - Union[ - cluster_service.ListOperationsResponse, - Awaitable[cluster_service.ListOperationsResponse] - ]]: - raise NotImplementedError() - - @property - def get_operation(self) -> Callable[ - [cluster_service.GetOperationRequest], - Union[ - cluster_service.Operation, - Awaitable[cluster_service.Operation] - ]]: - raise NotImplementedError() - - @property - def cancel_operation(self) -> Callable[ - [cluster_service.CancelOperationRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - @property - def get_server_config(self) -> Callable[ - [cluster_service.GetServerConfigRequest], - Union[ - cluster_service.ServerConfig, - Awaitable[cluster_service.ServerConfig] - ]]: - raise NotImplementedError() - - @property - def get_json_web_keys(self) -> Callable[ - [cluster_service.GetJSONWebKeysRequest], - Union[ - cluster_service.GetJSONWebKeysResponse, - Awaitable[cluster_service.GetJSONWebKeysResponse] - ]]: - raise NotImplementedError() - - @property - def list_node_pools(self) -> Callable[ - [cluster_service.ListNodePoolsRequest], - Union[ - cluster_service.ListNodePoolsResponse, - Awaitable[cluster_service.ListNodePoolsResponse] - ]]: - raise NotImplementedError() - - @property - def get_node_pool(self) -> Callable[ - [cluster_service.GetNodePoolRequest], - Union[ - cluster_service.NodePool, - Awaitable[cluster_service.NodePool] - ]]: - raise NotImplementedError() - - @property - def create_node_pool(self) -> Callable[ - [cluster_service.CreateNodePoolRequest], - Union[ - cluster_service.Operation, - Awaitable[cluster_service.Operation] - ]]: - raise NotImplementedError() - - @property - def delete_node_pool(self) -> Callable[ - [cluster_service.DeleteNodePoolRequest], - Union[ - cluster_service.Operation, - Awaitable[cluster_service.Operation] - ]]: - raise NotImplementedError() - - @property - def rollback_node_pool_upgrade(self) -> Callable[ - [cluster_service.RollbackNodePoolUpgradeRequest], - Union[ - cluster_service.Operation, - Awaitable[cluster_service.Operation] - ]]: - raise NotImplementedError() - - @property - def set_node_pool_management(self) -> Callable[ - [cluster_service.SetNodePoolManagementRequest], - Union[ - cluster_service.Operation, - Awaitable[cluster_service.Operation] - ]]: - raise NotImplementedError() - - @property - def set_labels(self) -> Callable[ - [cluster_service.SetLabelsRequest], - Union[ - cluster_service.Operation, - Awaitable[cluster_service.Operation] - ]]: - raise NotImplementedError() - - @property - def set_legacy_abac(self) -> Callable[ - [cluster_service.SetLegacyAbacRequest], - Union[ - cluster_service.Operation, - Awaitable[cluster_service.Operation] - ]]: - raise NotImplementedError() - - @property - def start_ip_rotation(self) -> Callable[ - [cluster_service.StartIPRotationRequest], - Union[ - cluster_service.Operation, - Awaitable[cluster_service.Operation] - ]]: - raise NotImplementedError() - - @property - def complete_ip_rotation(self) -> Callable[ - [cluster_service.CompleteIPRotationRequest], - Union[ - cluster_service.Operation, - Awaitable[cluster_service.Operation] - ]]: - raise NotImplementedError() - - @property - def set_node_pool_size(self) -> Callable[ - [cluster_service.SetNodePoolSizeRequest], - Union[ - cluster_service.Operation, - Awaitable[cluster_service.Operation] - ]]: - raise NotImplementedError() - - @property - def set_network_policy(self) -> Callable[ - [cluster_service.SetNetworkPolicyRequest], - Union[ - cluster_service.Operation, - Awaitable[cluster_service.Operation] - ]]: - raise NotImplementedError() - - @property - def set_maintenance_policy(self) -> Callable[ - [cluster_service.SetMaintenancePolicyRequest], - Union[ - cluster_service.Operation, - Awaitable[cluster_service.Operation] - ]]: - raise NotImplementedError() - - @property - def list_usable_subnetworks(self) -> Callable[ - [cluster_service.ListUsableSubnetworksRequest], - Union[ - cluster_service.ListUsableSubnetworksResponse, - Awaitable[cluster_service.ListUsableSubnetworksResponse] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'ClusterManagerTransport', -) diff --git a/owl-bot-staging/v1/google/container_v1/services/cluster_manager/transports/grpc.py b/owl-bot-staging/v1/google/container_v1/services/cluster_manager/transports/grpc.py deleted file mode 100644 index f690132c..00000000 --- a/owl-bot-staging/v1/google/container_v1/services/cluster_manager/transports/grpc.py +++ /dev/null @@ -1,1097 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers # type: ignore -from google.api_core import gapic_v1 # type: ignore -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.container_v1.types import cluster_service -from google.protobuf import empty_pb2 # type: ignore -from .base import ClusterManagerTransport, DEFAULT_CLIENT_INFO - - -class ClusterManagerGrpcTransport(ClusterManagerTransport): - """gRPC backend transport for ClusterManager. - - Google Kubernetes Engine Cluster Manager v1 - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'container.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'container.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def list_clusters(self) -> Callable[ - [cluster_service.ListClustersRequest], - cluster_service.ListClustersResponse]: - r"""Return a callable for the list clusters method over gRPC. - - Lists all clusters owned by a project in either the - specified zone or all zones. - - Returns: - Callable[[~.ListClustersRequest], - ~.ListClustersResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_clusters' not in self._stubs: - self._stubs['list_clusters'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/ListClusters', - request_serializer=cluster_service.ListClustersRequest.serialize, - response_deserializer=cluster_service.ListClustersResponse.deserialize, - ) - return self._stubs['list_clusters'] - - @property - def get_cluster(self) -> Callable[ - [cluster_service.GetClusterRequest], - cluster_service.Cluster]: - r"""Return a callable for the get cluster method over gRPC. - - Gets the details of a specific cluster. - - Returns: - Callable[[~.GetClusterRequest], - ~.Cluster]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_cluster' not in self._stubs: - self._stubs['get_cluster'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/GetCluster', - request_serializer=cluster_service.GetClusterRequest.serialize, - response_deserializer=cluster_service.Cluster.deserialize, - ) - return self._stubs['get_cluster'] - - @property - def create_cluster(self) -> Callable[ - [cluster_service.CreateClusterRequest], - cluster_service.Operation]: - r"""Return a callable for the create cluster method over gRPC. - - Creates a cluster, consisting of the specified number and type - of Google Compute Engine instances. - - By default, the cluster is created in the project's `default - network `__. - - One firewall is added for the cluster. After cluster creation, - the Kubelet creates routes for each node to allow the containers - on that node to communicate with all other instances in the - cluster. - - Finally, an entry is added to the project's global metadata - indicating which CIDR range the cluster is using. - - Returns: - Callable[[~.CreateClusterRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_cluster' not in self._stubs: - self._stubs['create_cluster'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/CreateCluster', - request_serializer=cluster_service.CreateClusterRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['create_cluster'] - - @property - def update_cluster(self) -> Callable[ - [cluster_service.UpdateClusterRequest], - cluster_service.Operation]: - r"""Return a callable for the update cluster method over gRPC. - - Updates the settings of a specific cluster. - - Returns: - Callable[[~.UpdateClusterRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_cluster' not in self._stubs: - self._stubs['update_cluster'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/UpdateCluster', - request_serializer=cluster_service.UpdateClusterRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['update_cluster'] - - @property - def update_node_pool(self) -> Callable[ - [cluster_service.UpdateNodePoolRequest], - cluster_service.Operation]: - r"""Return a callable for the update node pool method over gRPC. - - Updates the version and/or image type for the - specified node pool. - - Returns: - Callable[[~.UpdateNodePoolRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_node_pool' not in self._stubs: - self._stubs['update_node_pool'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/UpdateNodePool', - request_serializer=cluster_service.UpdateNodePoolRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['update_node_pool'] - - @property - def set_node_pool_autoscaling(self) -> Callable[ - [cluster_service.SetNodePoolAutoscalingRequest], - cluster_service.Operation]: - r"""Return a callable for the set node pool autoscaling method over gRPC. - - Sets the autoscaling settings for the specified node - pool. - - Returns: - Callable[[~.SetNodePoolAutoscalingRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_node_pool_autoscaling' not in self._stubs: - self._stubs['set_node_pool_autoscaling'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/SetNodePoolAutoscaling', - request_serializer=cluster_service.SetNodePoolAutoscalingRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['set_node_pool_autoscaling'] - - @property - def set_logging_service(self) -> Callable[ - [cluster_service.SetLoggingServiceRequest], - cluster_service.Operation]: - r"""Return a callable for the set logging service method over gRPC. - - Sets the logging service for a specific cluster. - - Returns: - Callable[[~.SetLoggingServiceRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_logging_service' not in self._stubs: - self._stubs['set_logging_service'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/SetLoggingService', - request_serializer=cluster_service.SetLoggingServiceRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['set_logging_service'] - - @property - def set_monitoring_service(self) -> Callable[ - [cluster_service.SetMonitoringServiceRequest], - cluster_service.Operation]: - r"""Return a callable for the set monitoring service method over gRPC. - - Sets the monitoring service for a specific cluster. - - Returns: - Callable[[~.SetMonitoringServiceRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_monitoring_service' not in self._stubs: - self._stubs['set_monitoring_service'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/SetMonitoringService', - request_serializer=cluster_service.SetMonitoringServiceRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['set_monitoring_service'] - - @property - def set_addons_config(self) -> Callable[ - [cluster_service.SetAddonsConfigRequest], - cluster_service.Operation]: - r"""Return a callable for the set addons config method over gRPC. - - Sets the addons for a specific cluster. - - Returns: - Callable[[~.SetAddonsConfigRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_addons_config' not in self._stubs: - self._stubs['set_addons_config'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/SetAddonsConfig', - request_serializer=cluster_service.SetAddonsConfigRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['set_addons_config'] - - @property - def set_locations(self) -> Callable[ - [cluster_service.SetLocationsRequest], - cluster_service.Operation]: - r"""Return a callable for the set locations method over gRPC. - - Sets the locations for a specific cluster. Deprecated. Use - `projects.locations.clusters.update `__ - instead. - - Returns: - Callable[[~.SetLocationsRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_locations' not in self._stubs: - self._stubs['set_locations'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/SetLocations', - request_serializer=cluster_service.SetLocationsRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['set_locations'] - - @property - def update_master(self) -> Callable[ - [cluster_service.UpdateMasterRequest], - cluster_service.Operation]: - r"""Return a callable for the update master method over gRPC. - - Updates the master for a specific cluster. - - Returns: - Callable[[~.UpdateMasterRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_master' not in self._stubs: - self._stubs['update_master'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/UpdateMaster', - request_serializer=cluster_service.UpdateMasterRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['update_master'] - - @property - def set_master_auth(self) -> Callable[ - [cluster_service.SetMasterAuthRequest], - cluster_service.Operation]: - r"""Return a callable for the set master auth method over gRPC. - - Sets master auth materials. Currently supports - changing the admin password or a specific cluster, - either via password generation or explicitly setting the - password. - - Returns: - Callable[[~.SetMasterAuthRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_master_auth' not in self._stubs: - self._stubs['set_master_auth'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/SetMasterAuth', - request_serializer=cluster_service.SetMasterAuthRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['set_master_auth'] - - @property - def delete_cluster(self) -> Callable[ - [cluster_service.DeleteClusterRequest], - cluster_service.Operation]: - r"""Return a callable for the delete cluster method over gRPC. - - Deletes the cluster, including the Kubernetes - endpoint and all worker nodes. - - Firewalls and routes that were configured during cluster - creation are also deleted. - - Other Google Compute Engine resources that might be in - use by the cluster, such as load balancer resources, are - not deleted if they weren't present when the cluster was - initially created. - - Returns: - Callable[[~.DeleteClusterRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_cluster' not in self._stubs: - self._stubs['delete_cluster'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/DeleteCluster', - request_serializer=cluster_service.DeleteClusterRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['delete_cluster'] - - @property - def list_operations(self) -> Callable[ - [cluster_service.ListOperationsRequest], - cluster_service.ListOperationsResponse]: - r"""Return a callable for the list operations method over gRPC. - - Lists all operations in a project in a specific zone - or all zones. - - Returns: - Callable[[~.ListOperationsRequest], - ~.ListOperationsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_operations' not in self._stubs: - self._stubs['list_operations'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/ListOperations', - request_serializer=cluster_service.ListOperationsRequest.serialize, - response_deserializer=cluster_service.ListOperationsResponse.deserialize, - ) - return self._stubs['list_operations'] - - @property - def get_operation(self) -> Callable[ - [cluster_service.GetOperationRequest], - cluster_service.Operation]: - r"""Return a callable for the get operation method over gRPC. - - Gets the specified operation. - - Returns: - Callable[[~.GetOperationRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_operation' not in self._stubs: - self._stubs['get_operation'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/GetOperation', - request_serializer=cluster_service.GetOperationRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['get_operation'] - - @property - def cancel_operation(self) -> Callable[ - [cluster_service.CancelOperationRequest], - empty_pb2.Empty]: - r"""Return a callable for the cancel operation method over gRPC. - - Cancels the specified operation. - - Returns: - Callable[[~.CancelOperationRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_operation' not in self._stubs: - self._stubs['cancel_operation'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/CancelOperation', - request_serializer=cluster_service.CancelOperationRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_operation'] - - @property - def get_server_config(self) -> Callable[ - [cluster_service.GetServerConfigRequest], - cluster_service.ServerConfig]: - r"""Return a callable for the get server config method over gRPC. - - Returns configuration info about the Google - Kubernetes Engine service. - - Returns: - Callable[[~.GetServerConfigRequest], - ~.ServerConfig]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_server_config' not in self._stubs: - self._stubs['get_server_config'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/GetServerConfig', - request_serializer=cluster_service.GetServerConfigRequest.serialize, - response_deserializer=cluster_service.ServerConfig.deserialize, - ) - return self._stubs['get_server_config'] - - @property - def get_json_web_keys(self) -> Callable[ - [cluster_service.GetJSONWebKeysRequest], - cluster_service.GetJSONWebKeysResponse]: - r"""Return a callable for the get json web keys method over gRPC. - - Gets the public component of the cluster signing keys - in JSON Web Key format. - This API is not yet intended for general use, and is not - available for all clusters. - - Returns: - Callable[[~.GetJSONWebKeysRequest], - ~.GetJSONWebKeysResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_json_web_keys' not in self._stubs: - self._stubs['get_json_web_keys'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/GetJSONWebKeys', - request_serializer=cluster_service.GetJSONWebKeysRequest.serialize, - response_deserializer=cluster_service.GetJSONWebKeysResponse.deserialize, - ) - return self._stubs['get_json_web_keys'] - - @property - def list_node_pools(self) -> Callable[ - [cluster_service.ListNodePoolsRequest], - cluster_service.ListNodePoolsResponse]: - r"""Return a callable for the list node pools method over gRPC. - - Lists the node pools for a cluster. - - Returns: - Callable[[~.ListNodePoolsRequest], - ~.ListNodePoolsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_node_pools' not in self._stubs: - self._stubs['list_node_pools'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/ListNodePools', - request_serializer=cluster_service.ListNodePoolsRequest.serialize, - response_deserializer=cluster_service.ListNodePoolsResponse.deserialize, - ) - return self._stubs['list_node_pools'] - - @property - def get_node_pool(self) -> Callable[ - [cluster_service.GetNodePoolRequest], - cluster_service.NodePool]: - r"""Return a callable for the get node pool method over gRPC. - - Retrieves the requested node pool. - - Returns: - Callable[[~.GetNodePoolRequest], - ~.NodePool]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_node_pool' not in self._stubs: - self._stubs['get_node_pool'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/GetNodePool', - request_serializer=cluster_service.GetNodePoolRequest.serialize, - response_deserializer=cluster_service.NodePool.deserialize, - ) - return self._stubs['get_node_pool'] - - @property - def create_node_pool(self) -> Callable[ - [cluster_service.CreateNodePoolRequest], - cluster_service.Operation]: - r"""Return a callable for the create node pool method over gRPC. - - Creates a node pool for a cluster. - - Returns: - Callable[[~.CreateNodePoolRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_node_pool' not in self._stubs: - self._stubs['create_node_pool'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/CreateNodePool', - request_serializer=cluster_service.CreateNodePoolRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['create_node_pool'] - - @property - def delete_node_pool(self) -> Callable[ - [cluster_service.DeleteNodePoolRequest], - cluster_service.Operation]: - r"""Return a callable for the delete node pool method over gRPC. - - Deletes a node pool from a cluster. - - Returns: - Callable[[~.DeleteNodePoolRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_node_pool' not in self._stubs: - self._stubs['delete_node_pool'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/DeleteNodePool', - request_serializer=cluster_service.DeleteNodePoolRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['delete_node_pool'] - - @property - def rollback_node_pool_upgrade(self) -> Callable[ - [cluster_service.RollbackNodePoolUpgradeRequest], - cluster_service.Operation]: - r"""Return a callable for the rollback node pool upgrade method over gRPC. - - Rolls back a previously Aborted or Failed NodePool - upgrade. This makes no changes if the last upgrade - successfully completed. - - Returns: - Callable[[~.RollbackNodePoolUpgradeRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'rollback_node_pool_upgrade' not in self._stubs: - self._stubs['rollback_node_pool_upgrade'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/RollbackNodePoolUpgrade', - request_serializer=cluster_service.RollbackNodePoolUpgradeRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['rollback_node_pool_upgrade'] - - @property - def set_node_pool_management(self) -> Callable[ - [cluster_service.SetNodePoolManagementRequest], - cluster_service.Operation]: - r"""Return a callable for the set node pool management method over gRPC. - - Sets the NodeManagement options for a node pool. - - Returns: - Callable[[~.SetNodePoolManagementRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_node_pool_management' not in self._stubs: - self._stubs['set_node_pool_management'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/SetNodePoolManagement', - request_serializer=cluster_service.SetNodePoolManagementRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['set_node_pool_management'] - - @property - def set_labels(self) -> Callable[ - [cluster_service.SetLabelsRequest], - cluster_service.Operation]: - r"""Return a callable for the set labels method over gRPC. - - Sets labels on a cluster. - - Returns: - Callable[[~.SetLabelsRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_labels' not in self._stubs: - self._stubs['set_labels'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/SetLabels', - request_serializer=cluster_service.SetLabelsRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['set_labels'] - - @property - def set_legacy_abac(self) -> Callable[ - [cluster_service.SetLegacyAbacRequest], - cluster_service.Operation]: - r"""Return a callable for the set legacy abac method over gRPC. - - Enables or disables the ABAC authorization mechanism - on a cluster. - - Returns: - Callable[[~.SetLegacyAbacRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_legacy_abac' not in self._stubs: - self._stubs['set_legacy_abac'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/SetLegacyAbac', - request_serializer=cluster_service.SetLegacyAbacRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['set_legacy_abac'] - - @property - def start_ip_rotation(self) -> Callable[ - [cluster_service.StartIPRotationRequest], - cluster_service.Operation]: - r"""Return a callable for the start ip rotation method over gRPC. - - Starts master IP rotation. - - Returns: - Callable[[~.StartIPRotationRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'start_ip_rotation' not in self._stubs: - self._stubs['start_ip_rotation'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/StartIPRotation', - request_serializer=cluster_service.StartIPRotationRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['start_ip_rotation'] - - @property - def complete_ip_rotation(self) -> Callable[ - [cluster_service.CompleteIPRotationRequest], - cluster_service.Operation]: - r"""Return a callable for the complete ip rotation method over gRPC. - - Completes master IP rotation. - - Returns: - Callable[[~.CompleteIPRotationRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'complete_ip_rotation' not in self._stubs: - self._stubs['complete_ip_rotation'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/CompleteIPRotation', - request_serializer=cluster_service.CompleteIPRotationRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['complete_ip_rotation'] - - @property - def set_node_pool_size(self) -> Callable[ - [cluster_service.SetNodePoolSizeRequest], - cluster_service.Operation]: - r"""Return a callable for the set node pool size method over gRPC. - - Sets the size for a specific node pool. - - Returns: - Callable[[~.SetNodePoolSizeRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_node_pool_size' not in self._stubs: - self._stubs['set_node_pool_size'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/SetNodePoolSize', - request_serializer=cluster_service.SetNodePoolSizeRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['set_node_pool_size'] - - @property - def set_network_policy(self) -> Callable[ - [cluster_service.SetNetworkPolicyRequest], - cluster_service.Operation]: - r"""Return a callable for the set network policy method over gRPC. - - Enables or disables Network Policy for a cluster. - - Returns: - Callable[[~.SetNetworkPolicyRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_network_policy' not in self._stubs: - self._stubs['set_network_policy'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/SetNetworkPolicy', - request_serializer=cluster_service.SetNetworkPolicyRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['set_network_policy'] - - @property - def set_maintenance_policy(self) -> Callable[ - [cluster_service.SetMaintenancePolicyRequest], - cluster_service.Operation]: - r"""Return a callable for the set maintenance policy method over gRPC. - - Sets the maintenance policy for a cluster. - - Returns: - Callable[[~.SetMaintenancePolicyRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_maintenance_policy' not in self._stubs: - self._stubs['set_maintenance_policy'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/SetMaintenancePolicy', - request_serializer=cluster_service.SetMaintenancePolicyRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['set_maintenance_policy'] - - @property - def list_usable_subnetworks(self) -> Callable[ - [cluster_service.ListUsableSubnetworksRequest], - cluster_service.ListUsableSubnetworksResponse]: - r"""Return a callable for the list usable subnetworks method over gRPC. - - Lists subnetworks that are usable for creating - clusters in a project. - - Returns: - Callable[[~.ListUsableSubnetworksRequest], - ~.ListUsableSubnetworksResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_usable_subnetworks' not in self._stubs: - self._stubs['list_usable_subnetworks'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/ListUsableSubnetworks', - request_serializer=cluster_service.ListUsableSubnetworksRequest.serialize, - response_deserializer=cluster_service.ListUsableSubnetworksResponse.deserialize, - ) - return self._stubs['list_usable_subnetworks'] - - -__all__ = ( - 'ClusterManagerGrpcTransport', -) diff --git a/owl-bot-staging/v1/google/container_v1/services/cluster_manager/transports/grpc_asyncio.py b/owl-bot-staging/v1/google/container_v1/services/cluster_manager/transports/grpc_asyncio.py deleted file mode 100644 index bff48325..00000000 --- a/owl-bot-staging/v1/google/container_v1/services/cluster_manager/transports/grpc_asyncio.py +++ /dev/null @@ -1,1101 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 # type: ignore -from google.api_core import grpc_helpers_async # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -import packaging.version - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.container_v1.types import cluster_service -from google.protobuf import empty_pb2 # type: ignore -from .base import ClusterManagerTransport, DEFAULT_CLIENT_INFO -from .grpc import ClusterManagerGrpcTransport - - -class ClusterManagerGrpcAsyncIOTransport(ClusterManagerTransport): - """gRPC AsyncIO backend transport for ClusterManager. - - Google Kubernetes Engine Cluster Manager v1 - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'container.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'container.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def list_clusters(self) -> Callable[ - [cluster_service.ListClustersRequest], - Awaitable[cluster_service.ListClustersResponse]]: - r"""Return a callable for the list clusters method over gRPC. - - Lists all clusters owned by a project in either the - specified zone or all zones. - - Returns: - Callable[[~.ListClustersRequest], - Awaitable[~.ListClustersResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_clusters' not in self._stubs: - self._stubs['list_clusters'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/ListClusters', - request_serializer=cluster_service.ListClustersRequest.serialize, - response_deserializer=cluster_service.ListClustersResponse.deserialize, - ) - return self._stubs['list_clusters'] - - @property - def get_cluster(self) -> Callable[ - [cluster_service.GetClusterRequest], - Awaitable[cluster_service.Cluster]]: - r"""Return a callable for the get cluster method over gRPC. - - Gets the details of a specific cluster. - - Returns: - Callable[[~.GetClusterRequest], - Awaitable[~.Cluster]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_cluster' not in self._stubs: - self._stubs['get_cluster'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/GetCluster', - request_serializer=cluster_service.GetClusterRequest.serialize, - response_deserializer=cluster_service.Cluster.deserialize, - ) - return self._stubs['get_cluster'] - - @property - def create_cluster(self) -> Callable[ - [cluster_service.CreateClusterRequest], - Awaitable[cluster_service.Operation]]: - r"""Return a callable for the create cluster method over gRPC. - - Creates a cluster, consisting of the specified number and type - of Google Compute Engine instances. - - By default, the cluster is created in the project's `default - network `__. - - One firewall is added for the cluster. After cluster creation, - the Kubelet creates routes for each node to allow the containers - on that node to communicate with all other instances in the - cluster. - - Finally, an entry is added to the project's global metadata - indicating which CIDR range the cluster is using. - - Returns: - Callable[[~.CreateClusterRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_cluster' not in self._stubs: - self._stubs['create_cluster'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/CreateCluster', - request_serializer=cluster_service.CreateClusterRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['create_cluster'] - - @property - def update_cluster(self) -> Callable[ - [cluster_service.UpdateClusterRequest], - Awaitable[cluster_service.Operation]]: - r"""Return a callable for the update cluster method over gRPC. - - Updates the settings of a specific cluster. - - Returns: - Callable[[~.UpdateClusterRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_cluster' not in self._stubs: - self._stubs['update_cluster'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/UpdateCluster', - request_serializer=cluster_service.UpdateClusterRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['update_cluster'] - - @property - def update_node_pool(self) -> Callable[ - [cluster_service.UpdateNodePoolRequest], - Awaitable[cluster_service.Operation]]: - r"""Return a callable for the update node pool method over gRPC. - - Updates the version and/or image type for the - specified node pool. - - Returns: - Callable[[~.UpdateNodePoolRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_node_pool' not in self._stubs: - self._stubs['update_node_pool'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/UpdateNodePool', - request_serializer=cluster_service.UpdateNodePoolRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['update_node_pool'] - - @property - def set_node_pool_autoscaling(self) -> Callable[ - [cluster_service.SetNodePoolAutoscalingRequest], - Awaitable[cluster_service.Operation]]: - r"""Return a callable for the set node pool autoscaling method over gRPC. - - Sets the autoscaling settings for the specified node - pool. - - Returns: - Callable[[~.SetNodePoolAutoscalingRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_node_pool_autoscaling' not in self._stubs: - self._stubs['set_node_pool_autoscaling'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/SetNodePoolAutoscaling', - request_serializer=cluster_service.SetNodePoolAutoscalingRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['set_node_pool_autoscaling'] - - @property - def set_logging_service(self) -> Callable[ - [cluster_service.SetLoggingServiceRequest], - Awaitable[cluster_service.Operation]]: - r"""Return a callable for the set logging service method over gRPC. - - Sets the logging service for a specific cluster. - - Returns: - Callable[[~.SetLoggingServiceRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_logging_service' not in self._stubs: - self._stubs['set_logging_service'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/SetLoggingService', - request_serializer=cluster_service.SetLoggingServiceRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['set_logging_service'] - - @property - def set_monitoring_service(self) -> Callable[ - [cluster_service.SetMonitoringServiceRequest], - Awaitable[cluster_service.Operation]]: - r"""Return a callable for the set monitoring service method over gRPC. - - Sets the monitoring service for a specific cluster. - - Returns: - Callable[[~.SetMonitoringServiceRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_monitoring_service' not in self._stubs: - self._stubs['set_monitoring_service'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/SetMonitoringService', - request_serializer=cluster_service.SetMonitoringServiceRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['set_monitoring_service'] - - @property - def set_addons_config(self) -> Callable[ - [cluster_service.SetAddonsConfigRequest], - Awaitable[cluster_service.Operation]]: - r"""Return a callable for the set addons config method over gRPC. - - Sets the addons for a specific cluster. - - Returns: - Callable[[~.SetAddonsConfigRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_addons_config' not in self._stubs: - self._stubs['set_addons_config'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/SetAddonsConfig', - request_serializer=cluster_service.SetAddonsConfigRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['set_addons_config'] - - @property - def set_locations(self) -> Callable[ - [cluster_service.SetLocationsRequest], - Awaitable[cluster_service.Operation]]: - r"""Return a callable for the set locations method over gRPC. - - Sets the locations for a specific cluster. Deprecated. Use - `projects.locations.clusters.update `__ - instead. - - Returns: - Callable[[~.SetLocationsRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_locations' not in self._stubs: - self._stubs['set_locations'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/SetLocations', - request_serializer=cluster_service.SetLocationsRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['set_locations'] - - @property - def update_master(self) -> Callable[ - [cluster_service.UpdateMasterRequest], - Awaitable[cluster_service.Operation]]: - r"""Return a callable for the update master method over gRPC. - - Updates the master for a specific cluster. - - Returns: - Callable[[~.UpdateMasterRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_master' not in self._stubs: - self._stubs['update_master'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/UpdateMaster', - request_serializer=cluster_service.UpdateMasterRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['update_master'] - - @property - def set_master_auth(self) -> Callable[ - [cluster_service.SetMasterAuthRequest], - Awaitable[cluster_service.Operation]]: - r"""Return a callable for the set master auth method over gRPC. - - Sets master auth materials. Currently supports - changing the admin password or a specific cluster, - either via password generation or explicitly setting the - password. - - Returns: - Callable[[~.SetMasterAuthRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_master_auth' not in self._stubs: - self._stubs['set_master_auth'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/SetMasterAuth', - request_serializer=cluster_service.SetMasterAuthRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['set_master_auth'] - - @property - def delete_cluster(self) -> Callable[ - [cluster_service.DeleteClusterRequest], - Awaitable[cluster_service.Operation]]: - r"""Return a callable for the delete cluster method over gRPC. - - Deletes the cluster, including the Kubernetes - endpoint and all worker nodes. - - Firewalls and routes that were configured during cluster - creation are also deleted. - - Other Google Compute Engine resources that might be in - use by the cluster, such as load balancer resources, are - not deleted if they weren't present when the cluster was - initially created. - - Returns: - Callable[[~.DeleteClusterRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_cluster' not in self._stubs: - self._stubs['delete_cluster'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/DeleteCluster', - request_serializer=cluster_service.DeleteClusterRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['delete_cluster'] - - @property - def list_operations(self) -> Callable[ - [cluster_service.ListOperationsRequest], - Awaitable[cluster_service.ListOperationsResponse]]: - r"""Return a callable for the list operations method over gRPC. - - Lists all operations in a project in a specific zone - or all zones. - - Returns: - Callable[[~.ListOperationsRequest], - Awaitable[~.ListOperationsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_operations' not in self._stubs: - self._stubs['list_operations'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/ListOperations', - request_serializer=cluster_service.ListOperationsRequest.serialize, - response_deserializer=cluster_service.ListOperationsResponse.deserialize, - ) - return self._stubs['list_operations'] - - @property - def get_operation(self) -> Callable[ - [cluster_service.GetOperationRequest], - Awaitable[cluster_service.Operation]]: - r"""Return a callable for the get operation method over gRPC. - - Gets the specified operation. - - Returns: - Callable[[~.GetOperationRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_operation' not in self._stubs: - self._stubs['get_operation'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/GetOperation', - request_serializer=cluster_service.GetOperationRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['get_operation'] - - @property - def cancel_operation(self) -> Callable[ - [cluster_service.CancelOperationRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the cancel operation method over gRPC. - - Cancels the specified operation. - - Returns: - Callable[[~.CancelOperationRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_operation' not in self._stubs: - self._stubs['cancel_operation'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/CancelOperation', - request_serializer=cluster_service.CancelOperationRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_operation'] - - @property - def get_server_config(self) -> Callable[ - [cluster_service.GetServerConfigRequest], - Awaitable[cluster_service.ServerConfig]]: - r"""Return a callable for the get server config method over gRPC. - - Returns configuration info about the Google - Kubernetes Engine service. - - Returns: - Callable[[~.GetServerConfigRequest], - Awaitable[~.ServerConfig]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_server_config' not in self._stubs: - self._stubs['get_server_config'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/GetServerConfig', - request_serializer=cluster_service.GetServerConfigRequest.serialize, - response_deserializer=cluster_service.ServerConfig.deserialize, - ) - return self._stubs['get_server_config'] - - @property - def get_json_web_keys(self) -> Callable[ - [cluster_service.GetJSONWebKeysRequest], - Awaitable[cluster_service.GetJSONWebKeysResponse]]: - r"""Return a callable for the get json web keys method over gRPC. - - Gets the public component of the cluster signing keys - in JSON Web Key format. - This API is not yet intended for general use, and is not - available for all clusters. - - Returns: - Callable[[~.GetJSONWebKeysRequest], - Awaitable[~.GetJSONWebKeysResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_json_web_keys' not in self._stubs: - self._stubs['get_json_web_keys'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/GetJSONWebKeys', - request_serializer=cluster_service.GetJSONWebKeysRequest.serialize, - response_deserializer=cluster_service.GetJSONWebKeysResponse.deserialize, - ) - return self._stubs['get_json_web_keys'] - - @property - def list_node_pools(self) -> Callable[ - [cluster_service.ListNodePoolsRequest], - Awaitable[cluster_service.ListNodePoolsResponse]]: - r"""Return a callable for the list node pools method over gRPC. - - Lists the node pools for a cluster. - - Returns: - Callable[[~.ListNodePoolsRequest], - Awaitable[~.ListNodePoolsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_node_pools' not in self._stubs: - self._stubs['list_node_pools'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/ListNodePools', - request_serializer=cluster_service.ListNodePoolsRequest.serialize, - response_deserializer=cluster_service.ListNodePoolsResponse.deserialize, - ) - return self._stubs['list_node_pools'] - - @property - def get_node_pool(self) -> Callable[ - [cluster_service.GetNodePoolRequest], - Awaitable[cluster_service.NodePool]]: - r"""Return a callable for the get node pool method over gRPC. - - Retrieves the requested node pool. - - Returns: - Callable[[~.GetNodePoolRequest], - Awaitable[~.NodePool]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_node_pool' not in self._stubs: - self._stubs['get_node_pool'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/GetNodePool', - request_serializer=cluster_service.GetNodePoolRequest.serialize, - response_deserializer=cluster_service.NodePool.deserialize, - ) - return self._stubs['get_node_pool'] - - @property - def create_node_pool(self) -> Callable[ - [cluster_service.CreateNodePoolRequest], - Awaitable[cluster_service.Operation]]: - r"""Return a callable for the create node pool method over gRPC. - - Creates a node pool for a cluster. - - Returns: - Callable[[~.CreateNodePoolRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_node_pool' not in self._stubs: - self._stubs['create_node_pool'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/CreateNodePool', - request_serializer=cluster_service.CreateNodePoolRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['create_node_pool'] - - @property - def delete_node_pool(self) -> Callable[ - [cluster_service.DeleteNodePoolRequest], - Awaitable[cluster_service.Operation]]: - r"""Return a callable for the delete node pool method over gRPC. - - Deletes a node pool from a cluster. - - Returns: - Callable[[~.DeleteNodePoolRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_node_pool' not in self._stubs: - self._stubs['delete_node_pool'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/DeleteNodePool', - request_serializer=cluster_service.DeleteNodePoolRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['delete_node_pool'] - - @property - def rollback_node_pool_upgrade(self) -> Callable[ - [cluster_service.RollbackNodePoolUpgradeRequest], - Awaitable[cluster_service.Operation]]: - r"""Return a callable for the rollback node pool upgrade method over gRPC. - - Rolls back a previously Aborted or Failed NodePool - upgrade. This makes no changes if the last upgrade - successfully completed. - - Returns: - Callable[[~.RollbackNodePoolUpgradeRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'rollback_node_pool_upgrade' not in self._stubs: - self._stubs['rollback_node_pool_upgrade'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/RollbackNodePoolUpgrade', - request_serializer=cluster_service.RollbackNodePoolUpgradeRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['rollback_node_pool_upgrade'] - - @property - def set_node_pool_management(self) -> Callable[ - [cluster_service.SetNodePoolManagementRequest], - Awaitable[cluster_service.Operation]]: - r"""Return a callable for the set node pool management method over gRPC. - - Sets the NodeManagement options for a node pool. - - Returns: - Callable[[~.SetNodePoolManagementRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_node_pool_management' not in self._stubs: - self._stubs['set_node_pool_management'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/SetNodePoolManagement', - request_serializer=cluster_service.SetNodePoolManagementRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['set_node_pool_management'] - - @property - def set_labels(self) -> Callable[ - [cluster_service.SetLabelsRequest], - Awaitable[cluster_service.Operation]]: - r"""Return a callable for the set labels method over gRPC. - - Sets labels on a cluster. - - Returns: - Callable[[~.SetLabelsRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_labels' not in self._stubs: - self._stubs['set_labels'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/SetLabels', - request_serializer=cluster_service.SetLabelsRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['set_labels'] - - @property - def set_legacy_abac(self) -> Callable[ - [cluster_service.SetLegacyAbacRequest], - Awaitable[cluster_service.Operation]]: - r"""Return a callable for the set legacy abac method over gRPC. - - Enables or disables the ABAC authorization mechanism - on a cluster. - - Returns: - Callable[[~.SetLegacyAbacRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_legacy_abac' not in self._stubs: - self._stubs['set_legacy_abac'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/SetLegacyAbac', - request_serializer=cluster_service.SetLegacyAbacRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['set_legacy_abac'] - - @property - def start_ip_rotation(self) -> Callable[ - [cluster_service.StartIPRotationRequest], - Awaitable[cluster_service.Operation]]: - r"""Return a callable for the start ip rotation method over gRPC. - - Starts master IP rotation. - - Returns: - Callable[[~.StartIPRotationRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'start_ip_rotation' not in self._stubs: - self._stubs['start_ip_rotation'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/StartIPRotation', - request_serializer=cluster_service.StartIPRotationRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['start_ip_rotation'] - - @property - def complete_ip_rotation(self) -> Callable[ - [cluster_service.CompleteIPRotationRequest], - Awaitable[cluster_service.Operation]]: - r"""Return a callable for the complete ip rotation method over gRPC. - - Completes master IP rotation. - - Returns: - Callable[[~.CompleteIPRotationRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'complete_ip_rotation' not in self._stubs: - self._stubs['complete_ip_rotation'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/CompleteIPRotation', - request_serializer=cluster_service.CompleteIPRotationRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['complete_ip_rotation'] - - @property - def set_node_pool_size(self) -> Callable[ - [cluster_service.SetNodePoolSizeRequest], - Awaitable[cluster_service.Operation]]: - r"""Return a callable for the set node pool size method over gRPC. - - Sets the size for a specific node pool. - - Returns: - Callable[[~.SetNodePoolSizeRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_node_pool_size' not in self._stubs: - self._stubs['set_node_pool_size'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/SetNodePoolSize', - request_serializer=cluster_service.SetNodePoolSizeRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['set_node_pool_size'] - - @property - def set_network_policy(self) -> Callable[ - [cluster_service.SetNetworkPolicyRequest], - Awaitable[cluster_service.Operation]]: - r"""Return a callable for the set network policy method over gRPC. - - Enables or disables Network Policy for a cluster. - - Returns: - Callable[[~.SetNetworkPolicyRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_network_policy' not in self._stubs: - self._stubs['set_network_policy'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/SetNetworkPolicy', - request_serializer=cluster_service.SetNetworkPolicyRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['set_network_policy'] - - @property - def set_maintenance_policy(self) -> Callable[ - [cluster_service.SetMaintenancePolicyRequest], - Awaitable[cluster_service.Operation]]: - r"""Return a callable for the set maintenance policy method over gRPC. - - Sets the maintenance policy for a cluster. - - Returns: - Callable[[~.SetMaintenancePolicyRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_maintenance_policy' not in self._stubs: - self._stubs['set_maintenance_policy'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/SetMaintenancePolicy', - request_serializer=cluster_service.SetMaintenancePolicyRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['set_maintenance_policy'] - - @property - def list_usable_subnetworks(self) -> Callable[ - [cluster_service.ListUsableSubnetworksRequest], - Awaitable[cluster_service.ListUsableSubnetworksResponse]]: - r"""Return a callable for the list usable subnetworks method over gRPC. - - Lists subnetworks that are usable for creating - clusters in a project. - - Returns: - Callable[[~.ListUsableSubnetworksRequest], - Awaitable[~.ListUsableSubnetworksResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_usable_subnetworks' not in self._stubs: - self._stubs['list_usable_subnetworks'] = self.grpc_channel.unary_unary( - '/google.container.v1.ClusterManager/ListUsableSubnetworks', - request_serializer=cluster_service.ListUsableSubnetworksRequest.serialize, - response_deserializer=cluster_service.ListUsableSubnetworksResponse.deserialize, - ) - return self._stubs['list_usable_subnetworks'] - - -__all__ = ( - 'ClusterManagerGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1/google/container_v1/types/__init__.py b/owl-bot-staging/v1/google/container_v1/types/__init__.py deleted file mode 100644 index fe1f93fb..00000000 --- a/owl-bot-staging/v1/google/container_v1/types/__init__.py +++ /dev/null @@ -1,210 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .cluster_service import ( - AcceleratorConfig, - AddonsConfig, - AuthenticatorGroupsConfig, - AutoprovisioningNodePoolDefaults, - AutoUpgradeOptions, - BinaryAuthorization, - CancelOperationRequest, - ClientCertificateConfig, - CloudRunConfig, - Cluster, - ClusterAutoscaling, - ClusterUpdate, - CompleteIPRotationRequest, - ConfigConnectorConfig, - CreateClusterRequest, - CreateNodePoolRequest, - DailyMaintenanceWindow, - DatabaseEncryption, - DefaultSnatStatus, - DeleteClusterRequest, - DeleteNodePoolRequest, - DnsCacheConfig, - GetClusterRequest, - GetJSONWebKeysRequest, - GetJSONWebKeysResponse, - GetNodePoolRequest, - GetOpenIDConfigRequest, - GetOpenIDConfigResponse, - GetOperationRequest, - GetServerConfigRequest, - HorizontalPodAutoscaling, - HttpLoadBalancing, - IntraNodeVisibilityConfig, - IPAllocationPolicy, - Jwk, - KubernetesDashboard, - LegacyAbac, - ListClustersRequest, - ListClustersResponse, - ListNodePoolsRequest, - ListNodePoolsResponse, - ListOperationsRequest, - ListOperationsResponse, - ListUsableSubnetworksRequest, - ListUsableSubnetworksResponse, - MaintenancePolicy, - MaintenanceWindow, - MasterAuth, - MasterAuthorizedNetworksConfig, - MaxPodsConstraint, - NetworkConfig, - NetworkPolicy, - NetworkPolicyConfig, - NodeConfig, - NodeManagement, - NodePool, - NodePoolAutoscaling, - NodeTaint, - Operation, - OperationProgress, - PrivateClusterConfig, - PrivateClusterMasterGlobalAccessConfig, - RecurringTimeWindow, - ReleaseChannel, - ReservationAffinity, - ResourceLimit, - ResourceUsageExportConfig, - RollbackNodePoolUpgradeRequest, - SandboxConfig, - ServerConfig, - SetAddonsConfigRequest, - SetLabelsRequest, - SetLegacyAbacRequest, - SetLocationsRequest, - SetLoggingServiceRequest, - SetMaintenancePolicyRequest, - SetMasterAuthRequest, - SetMonitoringServiceRequest, - SetNetworkPolicyRequest, - SetNodePoolAutoscalingRequest, - SetNodePoolManagementRequest, - SetNodePoolSizeRequest, - ShieldedInstanceConfig, - ShieldedNodes, - StartIPRotationRequest, - StatusCondition, - TimeWindow, - UpdateClusterRequest, - UpdateMasterRequest, - UpdateNodePoolRequest, - UsableSubnetwork, - UsableSubnetworkSecondaryRange, - VerticalPodAutoscaling, - WorkloadIdentityConfig, - WorkloadMetadataConfig, -) - -__all__ = ( - 'AcceleratorConfig', - 'AddonsConfig', - 'AuthenticatorGroupsConfig', - 'AutoprovisioningNodePoolDefaults', - 'AutoUpgradeOptions', - 'BinaryAuthorization', - 'CancelOperationRequest', - 'ClientCertificateConfig', - 'CloudRunConfig', - 'Cluster', - 'ClusterAutoscaling', - 'ClusterUpdate', - 'CompleteIPRotationRequest', - 'ConfigConnectorConfig', - 'CreateClusterRequest', - 'CreateNodePoolRequest', - 'DailyMaintenanceWindow', - 'DatabaseEncryption', - 'DefaultSnatStatus', - 'DeleteClusterRequest', - 'DeleteNodePoolRequest', - 'DnsCacheConfig', - 'GetClusterRequest', - 'GetJSONWebKeysRequest', - 'GetJSONWebKeysResponse', - 'GetNodePoolRequest', - 'GetOpenIDConfigRequest', - 'GetOpenIDConfigResponse', - 'GetOperationRequest', - 'GetServerConfigRequest', - 'HorizontalPodAutoscaling', - 'HttpLoadBalancing', - 'IntraNodeVisibilityConfig', - 'IPAllocationPolicy', - 'Jwk', - 'KubernetesDashboard', - 'LegacyAbac', - 'ListClustersRequest', - 'ListClustersResponse', - 'ListNodePoolsRequest', - 'ListNodePoolsResponse', - 'ListOperationsRequest', - 'ListOperationsResponse', - 'ListUsableSubnetworksRequest', - 'ListUsableSubnetworksResponse', - 'MaintenancePolicy', - 'MaintenanceWindow', - 'MasterAuth', - 'MasterAuthorizedNetworksConfig', - 'MaxPodsConstraint', - 'NetworkConfig', - 'NetworkPolicy', - 'NetworkPolicyConfig', - 'NodeConfig', - 'NodeManagement', - 'NodePool', - 'NodePoolAutoscaling', - 'NodeTaint', - 'Operation', - 'OperationProgress', - 'PrivateClusterConfig', - 'PrivateClusterMasterGlobalAccessConfig', - 'RecurringTimeWindow', - 'ReleaseChannel', - 'ReservationAffinity', - 'ResourceLimit', - 'ResourceUsageExportConfig', - 'RollbackNodePoolUpgradeRequest', - 'SandboxConfig', - 'ServerConfig', - 'SetAddonsConfigRequest', - 'SetLabelsRequest', - 'SetLegacyAbacRequest', - 'SetLocationsRequest', - 'SetLoggingServiceRequest', - 'SetMaintenancePolicyRequest', - 'SetMasterAuthRequest', - 'SetMonitoringServiceRequest', - 'SetNetworkPolicyRequest', - 'SetNodePoolAutoscalingRequest', - 'SetNodePoolManagementRequest', - 'SetNodePoolSizeRequest', - 'ShieldedInstanceConfig', - 'ShieldedNodes', - 'StartIPRotationRequest', - 'StatusCondition', - 'TimeWindow', - 'UpdateClusterRequest', - 'UpdateMasterRequest', - 'UpdateNodePoolRequest', - 'UsableSubnetwork', - 'UsableSubnetworkSecondaryRange', - 'VerticalPodAutoscaling', - 'WorkloadIdentityConfig', - 'WorkloadMetadataConfig', -) diff --git a/owl-bot-staging/v1/google/container_v1/types/cluster_service.py b/owl-bot-staging/v1/google/container_v1/types/cluster_service.py deleted file mode 100644 index 25e7f0b1..00000000 --- a/owl-bot-staging/v1/google/container_v1/types/cluster_service.py +++ /dev/null @@ -1,5120 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.container.v1', - manifest={ - 'NodeConfig', - 'ShieldedInstanceConfig', - 'SandboxConfig', - 'ReservationAffinity', - 'NodeTaint', - 'MasterAuth', - 'ClientCertificateConfig', - 'AddonsConfig', - 'HttpLoadBalancing', - 'HorizontalPodAutoscaling', - 'KubernetesDashboard', - 'NetworkPolicyConfig', - 'DnsCacheConfig', - 'PrivateClusterMasterGlobalAccessConfig', - 'PrivateClusterConfig', - 'AuthenticatorGroupsConfig', - 'CloudRunConfig', - 'ConfigConnectorConfig', - 'MasterAuthorizedNetworksConfig', - 'LegacyAbac', - 'NetworkPolicy', - 'BinaryAuthorization', - 'IPAllocationPolicy', - 'Cluster', - 'ClusterUpdate', - 'Operation', - 'OperationProgress', - 'CreateClusterRequest', - 'GetClusterRequest', - 'UpdateClusterRequest', - 'UpdateNodePoolRequest', - 'SetNodePoolAutoscalingRequest', - 'SetLoggingServiceRequest', - 'SetMonitoringServiceRequest', - 'SetAddonsConfigRequest', - 'SetLocationsRequest', - 'UpdateMasterRequest', - 'SetMasterAuthRequest', - 'DeleteClusterRequest', - 'ListClustersRequest', - 'ListClustersResponse', - 'GetOperationRequest', - 'ListOperationsRequest', - 'CancelOperationRequest', - 'ListOperationsResponse', - 'GetServerConfigRequest', - 'ServerConfig', - 'CreateNodePoolRequest', - 'DeleteNodePoolRequest', - 'ListNodePoolsRequest', - 'GetNodePoolRequest', - 'NodePool', - 'NodeManagement', - 'AutoUpgradeOptions', - 'MaintenancePolicy', - 'MaintenanceWindow', - 'TimeWindow', - 'RecurringTimeWindow', - 'DailyMaintenanceWindow', - 'SetNodePoolManagementRequest', - 'SetNodePoolSizeRequest', - 'RollbackNodePoolUpgradeRequest', - 'ListNodePoolsResponse', - 'ClusterAutoscaling', - 'AutoprovisioningNodePoolDefaults', - 'ResourceLimit', - 'NodePoolAutoscaling', - 'SetLabelsRequest', - 'SetLegacyAbacRequest', - 'StartIPRotationRequest', - 'CompleteIPRotationRequest', - 'AcceleratorConfig', - 'WorkloadMetadataConfig', - 'SetNetworkPolicyRequest', - 'SetMaintenancePolicyRequest', - 'StatusCondition', - 'NetworkConfig', - 'GetOpenIDConfigRequest', - 'GetOpenIDConfigResponse', - 'GetJSONWebKeysRequest', - 'Jwk', - 'GetJSONWebKeysResponse', - 'ReleaseChannel', - 'IntraNodeVisibilityConfig', - 'MaxPodsConstraint', - 'WorkloadIdentityConfig', - 'DatabaseEncryption', - 'ListUsableSubnetworksRequest', - 'ListUsableSubnetworksResponse', - 'UsableSubnetworkSecondaryRange', - 'UsableSubnetwork', - 'ResourceUsageExportConfig', - 'VerticalPodAutoscaling', - 'DefaultSnatStatus', - 'ShieldedNodes', - }, -) - - -class NodeConfig(proto.Message): - r"""Parameters that describe the nodes in a cluster. - Attributes: - machine_type (str): - The name of a Google Compute Engine `machine - type `__ - - If unspecified, the default machine type is ``e2-medium``. - disk_size_gb (int): - Size of the disk attached to each node, - specified in GB. The smallest allowed disk size - is 10GB. - If unspecified, the default disk size is 100GB. - oauth_scopes (Sequence[str]): - The set of Google API scopes to be made available on all of - the node VMs under the "default" service account. - - The following scopes are recommended, but not required, and - by default are not included: - - - ``https://www.googleapis.com/auth/compute`` is required - for mounting persistent storage on your nodes. - - ``https://www.googleapis.com/auth/devstorage.read_only`` - is required for communicating with **gcr.io** (the - `Google Container - Registry `__). - - If unspecified, no scopes are added, unless Cloud Logging or - Cloud Monitoring are enabled, in which case their required - scopes will be added. - service_account (str): - The Google Cloud Platform Service Account to - be used by the node VMs. Specify the email - address of the Service Account; otherwise, if no - Service Account is specified, the "default" - service account is used. - metadata (Sequence[google.container_v1.types.NodeConfig.MetadataEntry]): - The metadata key/value pairs assigned to instances in the - cluster. - - Keys must conform to the regexp ``[a-zA-Z0-9-_]+`` and be - less than 128 bytes in length. These are reflected as part - of a URL in the metadata server. Additionally, to avoid - ambiguity, keys must not conflict with any other metadata - keys for the project or be one of the reserved keys: - - - "cluster-location" - - "cluster-name" - - "cluster-uid" - - "configure-sh" - - "containerd-configure-sh" - - "enable-os-login" - - "gci-ensure-gke-docker" - - "gci-metrics-enabled" - - "gci-update-strategy" - - "instance-template" - - "kube-env" - - "startup-script" - - "user-data" - - "disable-address-manager" - - "windows-startup-script-ps1" - - "common-psm1" - - "k8s-node-setup-psm1" - - "install-ssh-psm1" - - "user-profile-psm1" - - The following keys are reserved for Windows nodes: - - - "serial-port-logging-enable" - - Values are free-form strings, and only have meaning as - interpreted by the image running in the instance. The only - restriction placed on them is that each value's size must be - less than or equal to 32 KB. - - The total size of all keys and values must be less than 512 - KB. - image_type (str): - The image type to use for this node. Note - that for a given image type, the latest version - of it will be used. - labels (Sequence[google.container_v1.types.NodeConfig.LabelsEntry]): - The map of Kubernetes labels (key/value - pairs) to be applied to each node. These will - added in addition to any default label(s) that - Kubernetes may apply to the node. - In case of conflict in label keys, the applied - set may differ depending on the Kubernetes - version -- it's best to assume the behavior is - undefined and conflicts should be avoided. - For more information, including usage and the - valid values, see: - https://kubernetes.io/docs/concepts/overview/working- - with-objects/labels/ - local_ssd_count (int): - The number of local SSD disks to be attached - to the node. - The limit for this value is dependent upon the - maximum number of disks available on a machine - per zone. See: - https://cloud.google.com/compute/docs/disks/local- - ssd for more information. - tags (Sequence[str]): - The list of instance tags applied to all - nodes. Tags are used to identify valid sources - or targets for network firewalls and are - specified by the client during cluster or node - pool creation. Each tag within the list must - comply with RFC1035. - preemptible (bool): - Whether the nodes are created as preemptible - VM instances. See: - https://cloud.google.com/compute/docs/instances/preemptible - for more information about preemptible VM - instances. - accelerators (Sequence[google.container_v1.types.AcceleratorConfig]): - A list of hardware accelerators to be - attached to each node. See - https://cloud.google.com/compute/docs/gpus for - more information about support for GPUs. - disk_type (str): - Type of the disk attached to each node (e.g. - 'pd-standard', 'pd-ssd' or 'pd-balanced') - - If unspecified, the default disk type is 'pd- - standard' - min_cpu_platform (str): - Minimum CPU platform to be used by this instance. The - instance may be scheduled on the specified or newer CPU - platform. Applicable values are the friendly names of CPU - platforms, such as ``minCpuPlatform: "Intel Haswell"`` or - ``minCpuPlatform: "Intel Sandy Bridge"``. For more - information, read `how to specify min CPU - platform `__ - workload_metadata_config (google.container_v1.types.WorkloadMetadataConfig): - The workload metadata configuration for this - node. - taints (Sequence[google.container_v1.types.NodeTaint]): - List of kubernetes taints to be applied to - each node. - For more information, including usage and the - valid values, see: - https://kubernetes.io/docs/concepts/configuration/taint- - and-toleration/ - sandbox_config (google.container_v1.types.SandboxConfig): - Sandbox configuration for this node. - node_group (str): - Setting this field will assign instances of this pool to run - on the specified node group. This is useful for running - workloads on `sole tenant - nodes `__. - reservation_affinity (google.container_v1.types.ReservationAffinity): - The optional reservation affinity. Setting this field will - apply the specified `Zonal Compute - Reservation `__ - to this node pool. - shielded_instance_config (google.container_v1.types.ShieldedInstanceConfig): - Shielded Instance options. - boot_disk_kms_key (str): - The Customer Managed Encryption Key used to encrypt the boot - disk attached to each node in the node pool. This should be - of the form - projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. - For more information about protecting resources with Cloud - KMS Keys please see: - https://cloud.google.com/compute/docs/disks/customer-managed-encryption - """ - - machine_type = proto.Field( - proto.STRING, - number=1, - ) - disk_size_gb = proto.Field( - proto.INT32, - number=2, - ) - oauth_scopes = proto.RepeatedField( - proto.STRING, - number=3, - ) - service_account = proto.Field( - proto.STRING, - number=9, - ) - metadata = proto.MapField( - proto.STRING, - proto.STRING, - number=4, - ) - image_type = proto.Field( - proto.STRING, - number=5, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=6, - ) - local_ssd_count = proto.Field( - proto.INT32, - number=7, - ) - tags = proto.RepeatedField( - proto.STRING, - number=8, - ) - preemptible = proto.Field( - proto.BOOL, - number=10, - ) - accelerators = proto.RepeatedField( - proto.MESSAGE, - number=11, - message='AcceleratorConfig', - ) - disk_type = proto.Field( - proto.STRING, - number=12, - ) - min_cpu_platform = proto.Field( - proto.STRING, - number=13, - ) - workload_metadata_config = proto.Field( - proto.MESSAGE, - number=14, - message='WorkloadMetadataConfig', - ) - taints = proto.RepeatedField( - proto.MESSAGE, - number=15, - message='NodeTaint', - ) - sandbox_config = proto.Field( - proto.MESSAGE, - number=17, - message='SandboxConfig', - ) - node_group = proto.Field( - proto.STRING, - number=18, - ) - reservation_affinity = proto.Field( - proto.MESSAGE, - number=19, - message='ReservationAffinity', - ) - shielded_instance_config = proto.Field( - proto.MESSAGE, - number=20, - message='ShieldedInstanceConfig', - ) - boot_disk_kms_key = proto.Field( - proto.STRING, - number=23, - ) - - -class ShieldedInstanceConfig(proto.Message): - r"""A set of Shielded Instance options. - Attributes: - enable_secure_boot (bool): - Defines whether the instance has Secure Boot - enabled. - Secure Boot helps ensure that the system only - runs authentic software by verifying the digital - signature of all boot components, and halting - the boot process if signature verification - fails. - enable_integrity_monitoring (bool): - Defines whether the instance has integrity - monitoring enabled. - Enables monitoring and attestation of the boot - integrity of the instance. The attestation is - performed against the integrity policy baseline. - This baseline is initially derived from the - implicitly trusted boot image when the instance - is created. - """ - - enable_secure_boot = proto.Field( - proto.BOOL, - number=1, - ) - enable_integrity_monitoring = proto.Field( - proto.BOOL, - number=2, - ) - - -class SandboxConfig(proto.Message): - r"""SandboxConfig contains configurations of the sandbox to use - for the node. - - Attributes: - type_ (google.container_v1.types.SandboxConfig.Type): - Type of the sandbox to use for the node. - """ - class Type(proto.Enum): - r"""Possible types of sandboxes.""" - UNSPECIFIED = 0 - GVISOR = 1 - - type_ = proto.Field( - proto.ENUM, - number=2, - enum=Type, - ) - - -class ReservationAffinity(proto.Message): - r"""`ReservationAffinity `__ - is the configuration of desired reservation which instances could - take capacity from. - - Attributes: - consume_reservation_type (google.container_v1.types.ReservationAffinity.Type): - Corresponds to the type of reservation - consumption. - key (str): - Corresponds to the label key of a reservation resource. To - target a SPECIFIC_RESERVATION by name, specify - "googleapis.com/reservation-name" as the key and specify the - name of your reservation as its value. - values (Sequence[str]): - Corresponds to the label value(s) of - reservation resource(s). - """ - class Type(proto.Enum): - r"""Indicates whether to consume capacity from a reservation or - not. - """ - UNSPECIFIED = 0 - NO_RESERVATION = 1 - ANY_RESERVATION = 2 - SPECIFIC_RESERVATION = 3 - - consume_reservation_type = proto.Field( - proto.ENUM, - number=1, - enum=Type, - ) - key = proto.Field( - proto.STRING, - number=2, - ) - values = proto.RepeatedField( - proto.STRING, - number=3, - ) - - -class NodeTaint(proto.Message): - r"""Kubernetes taint is comprised of three fields: key, value, and - effect. Effect can only be one of three types: NoSchedule, - PreferNoSchedule or NoExecute. - - See - `here `__ - for more information, including usage and the valid values. - - Attributes: - key (str): - Key for taint. - value (str): - Value for taint. - effect (google.container_v1.types.NodeTaint.Effect): - Effect for taint. - """ - class Effect(proto.Enum): - r"""Possible values for Effect in taint.""" - EFFECT_UNSPECIFIED = 0 - NO_SCHEDULE = 1 - PREFER_NO_SCHEDULE = 2 - NO_EXECUTE = 3 - - key = proto.Field( - proto.STRING, - number=1, - ) - value = proto.Field( - proto.STRING, - number=2, - ) - effect = proto.Field( - proto.ENUM, - number=3, - enum=Effect, - ) - - -class MasterAuth(proto.Message): - r"""The authentication information for accessing the master - endpoint. Authentication can be done using HTTP basic auth or - using client certificates. - - Attributes: - username (str): - The username to use for HTTP basic - authentication to the master endpoint. For - clusters v1.6.0 and later, basic authentication - can be disabled by leaving username unspecified - (or setting it to the empty string). - Warning: basic authentication is deprecated, and - will be removed in GKE control plane versions - 1.19 and newer. For a list of recommended - authentication methods, see: - https://cloud.google.com/kubernetes- - engine/docs/how-to/api-server-authentication - password (str): - The password to use for HTTP basic - authentication to the master endpoint. Because - the master endpoint is open to the Internet, you - should create a strong password. If a password - is provided for cluster creation, username must - be non-empty. - - Warning: basic authentication is deprecated, and - will be removed in GKE control plane versions - 1.19 and newer. For a list of recommended - authentication methods, see: - https://cloud.google.com/kubernetes- - engine/docs/how-to/api-server-authentication - client_certificate_config (google.container_v1.types.ClientCertificateConfig): - Configuration for client certificate - authentication on the cluster. For clusters - before v1.12, if no configuration is specified, - a client certificate is issued. - cluster_ca_certificate (str): - [Output only] Base64-encoded public certificate that is the - root of trust for the cluster. - client_certificate (str): - [Output only] Base64-encoded public certificate used by - clients to authenticate to the cluster endpoint. - client_key (str): - [Output only] Base64-encoded private key used by clients to - authenticate to the cluster endpoint. - """ - - username = proto.Field( - proto.STRING, - number=1, - ) - password = proto.Field( - proto.STRING, - number=2, - ) - client_certificate_config = proto.Field( - proto.MESSAGE, - number=3, - message='ClientCertificateConfig', - ) - cluster_ca_certificate = proto.Field( - proto.STRING, - number=100, - ) - client_certificate = proto.Field( - proto.STRING, - number=101, - ) - client_key = proto.Field( - proto.STRING, - number=102, - ) - - -class ClientCertificateConfig(proto.Message): - r"""Configuration for client certificates on the cluster. - Attributes: - issue_client_certificate (bool): - Issue a client certificate. - """ - - issue_client_certificate = proto.Field( - proto.BOOL, - number=1, - ) - - -class AddonsConfig(proto.Message): - r"""Configuration for the addons that can be automatically spun - up in the cluster, enabling additional functionality. - - Attributes: - http_load_balancing (google.container_v1.types.HttpLoadBalancing): - Configuration for the HTTP (L7) load - balancing controller addon, which makes it easy - to set up HTTP load balancers for services in a - cluster. - horizontal_pod_autoscaling (google.container_v1.types.HorizontalPodAutoscaling): - Configuration for the horizontal pod - autoscaling feature, which increases or - decreases the number of replica pods a - replication controller has based on the resource - usage of the existing pods. - kubernetes_dashboard (google.container_v1.types.KubernetesDashboard): - Configuration for the Kubernetes Dashboard. - This addon is deprecated, and will be disabled - in 1.15. It is recommended to use the Cloud - Console to manage and monitor your Kubernetes - clusters, workloads and applications. For more - information, see: - https://cloud.google.com/kubernetes- - engine/docs/concepts/dashboards - network_policy_config (google.container_v1.types.NetworkPolicyConfig): - Configuration for NetworkPolicy. This only - tracks whether the addon is enabled or not on - the Master, it does not track whether network - policy is enabled for the nodes. - cloud_run_config (google.container_v1.types.CloudRunConfig): - Configuration for the Cloud Run addon, which - allows the user to use a managed Knative - service. - dns_cache_config (google.container_v1.types.DnsCacheConfig): - Configuration for NodeLocalDNS, a dns cache - running on cluster nodes - config_connector_config (google.container_v1.types.ConfigConnectorConfig): - Configuration for the ConfigConnector add-on, - a Kubernetes extension to manage hosted GCP - services through the Kubernetes API - """ - - http_load_balancing = proto.Field( - proto.MESSAGE, - number=1, - message='HttpLoadBalancing', - ) - horizontal_pod_autoscaling = proto.Field( - proto.MESSAGE, - number=2, - message='HorizontalPodAutoscaling', - ) - kubernetes_dashboard = proto.Field( - proto.MESSAGE, - number=3, - message='KubernetesDashboard', - ) - network_policy_config = proto.Field( - proto.MESSAGE, - number=4, - message='NetworkPolicyConfig', - ) - cloud_run_config = proto.Field( - proto.MESSAGE, - number=7, - message='CloudRunConfig', - ) - dns_cache_config = proto.Field( - proto.MESSAGE, - number=8, - message='DnsCacheConfig', - ) - config_connector_config = proto.Field( - proto.MESSAGE, - number=10, - message='ConfigConnectorConfig', - ) - - -class HttpLoadBalancing(proto.Message): - r"""Configuration options for the HTTP (L7) load balancing - controller addon, which makes it easy to set up HTTP load - balancers for services in a cluster. - - Attributes: - disabled (bool): - Whether the HTTP Load Balancing controller is - enabled in the cluster. When enabled, it runs a - small pod in the cluster that manages the load - balancers. - """ - - disabled = proto.Field( - proto.BOOL, - number=1, - ) - - -class HorizontalPodAutoscaling(proto.Message): - r"""Configuration options for the horizontal pod autoscaling - feature, which increases or decreases the number of replica pods - a replication controller has based on the resource usage of the - existing pods. - - Attributes: - disabled (bool): - Whether the Horizontal Pod Autoscaling - feature is enabled in the cluster. When enabled, - it ensures that metrics are collected into - Stackdriver Monitoring. - """ - - disabled = proto.Field( - proto.BOOL, - number=1, - ) - - -class KubernetesDashboard(proto.Message): - r"""Configuration for the Kubernetes Dashboard. - Attributes: - disabled (bool): - Whether the Kubernetes Dashboard is enabled - for this cluster. - """ - - disabled = proto.Field( - proto.BOOL, - number=1, - ) - - -class NetworkPolicyConfig(proto.Message): - r"""Configuration for NetworkPolicy. This only tracks whether the - addon is enabled or not on the Master, it does not track whether - network policy is enabled for the nodes. - - Attributes: - disabled (bool): - Whether NetworkPolicy is enabled for this - cluster. - """ - - disabled = proto.Field( - proto.BOOL, - number=1, - ) - - -class DnsCacheConfig(proto.Message): - r"""Configuration for NodeLocal DNSCache - Attributes: - enabled (bool): - Whether NodeLocal DNSCache is enabled for - this cluster. - """ - - enabled = proto.Field( - proto.BOOL, - number=1, - ) - - -class PrivateClusterMasterGlobalAccessConfig(proto.Message): - r"""Configuration for controlling master global access settings. - Attributes: - enabled (bool): - Whenever master is accessible globally or - not. - """ - - enabled = proto.Field( - proto.BOOL, - number=1, - ) - - -class PrivateClusterConfig(proto.Message): - r"""Configuration options for private clusters. - Attributes: - enable_private_nodes (bool): - Whether nodes have internal IP addresses - only. If enabled, all nodes are given only RFC - 1918 private addresses and communicate with the - master via private networking. - enable_private_endpoint (bool): - Whether the master's internal IP address is - used as the cluster endpoint. - master_ipv4_cidr_block (str): - The IP range in CIDR notation to use for the - hosted master network. This range will be used - for assigning internal IP addresses to the - master or set of masters, as well as the ILB - VIP. This range must not overlap with any other - ranges in use within the cluster's network. - private_endpoint (str): - Output only. The internal IP address of this - cluster's master endpoint. - public_endpoint (str): - Output only. The external IP address of this - cluster's master endpoint. - peering_name (str): - Output only. The peering name in the customer - VPC used by this cluster. - master_global_access_config (google.container_v1.types.PrivateClusterMasterGlobalAccessConfig): - Controls master global access settings. - """ - - enable_private_nodes = proto.Field( - proto.BOOL, - number=1, - ) - enable_private_endpoint = proto.Field( - proto.BOOL, - number=2, - ) - master_ipv4_cidr_block = proto.Field( - proto.STRING, - number=3, - ) - private_endpoint = proto.Field( - proto.STRING, - number=4, - ) - public_endpoint = proto.Field( - proto.STRING, - number=5, - ) - peering_name = proto.Field( - proto.STRING, - number=7, - ) - master_global_access_config = proto.Field( - proto.MESSAGE, - number=8, - message='PrivateClusterMasterGlobalAccessConfig', - ) - - -class AuthenticatorGroupsConfig(proto.Message): - r"""Configuration for returning group information from - authenticators. - - Attributes: - enabled (bool): - Whether this cluster should return group - membership lookups during authentication using a - group of security groups. - security_group (str): - The name of the security group-of-groups to - be used. Only relevant if enabled = true. - """ - - enabled = proto.Field( - proto.BOOL, - number=1, - ) - security_group = proto.Field( - proto.STRING, - number=2, - ) - - -class CloudRunConfig(proto.Message): - r"""Configuration options for the Cloud Run feature. - Attributes: - disabled (bool): - Whether Cloud Run addon is enabled for this - cluster. - load_balancer_type (google.container_v1.types.CloudRunConfig.LoadBalancerType): - Which load balancer type is installed for - Cloud Run. - """ - class LoadBalancerType(proto.Enum): - r"""Load balancer type of ingress service of Cloud Run.""" - LOAD_BALANCER_TYPE_UNSPECIFIED = 0 - LOAD_BALANCER_TYPE_EXTERNAL = 1 - LOAD_BALANCER_TYPE_INTERNAL = 2 - - disabled = proto.Field( - proto.BOOL, - number=1, - ) - load_balancer_type = proto.Field( - proto.ENUM, - number=3, - enum=LoadBalancerType, - ) - - -class ConfigConnectorConfig(proto.Message): - r"""Configuration options for the Config Connector add-on. - Attributes: - enabled (bool): - Whether Cloud Connector is enabled for this - cluster. - """ - - enabled = proto.Field( - proto.BOOL, - number=1, - ) - - -class MasterAuthorizedNetworksConfig(proto.Message): - r"""Configuration options for the master authorized networks - feature. Enabled master authorized networks will disallow all - external traffic to access Kubernetes master through HTTPS - except traffic from the given CIDR blocks, Google Compute Engine - Public IPs and Google Prod IPs. - - Attributes: - enabled (bool): - Whether or not master authorized networks is - enabled. - cidr_blocks (Sequence[google.container_v1.types.MasterAuthorizedNetworksConfig.CidrBlock]): - cidr_blocks define up to 50 external networks that could - access Kubernetes master through HTTPS. - """ - - class CidrBlock(proto.Message): - r"""CidrBlock contains an optional name and one CIDR block. - Attributes: - display_name (str): - display_name is an optional field for users to identify CIDR - blocks. - cidr_block (str): - cidr_block must be specified in CIDR notation. - """ - - display_name = proto.Field( - proto.STRING, - number=1, - ) - cidr_block = proto.Field( - proto.STRING, - number=2, - ) - - enabled = proto.Field( - proto.BOOL, - number=1, - ) - cidr_blocks = proto.RepeatedField( - proto.MESSAGE, - number=2, - message=CidrBlock, - ) - - -class LegacyAbac(proto.Message): - r"""Configuration for the legacy Attribute Based Access Control - authorization mode. - - Attributes: - enabled (bool): - Whether the ABAC authorizer is enabled for - this cluster. When enabled, identities in the - system, including service accounts, nodes, and - controllers, will have statically granted - permissions beyond those provided by the RBAC - configuration or IAM. - """ - - enabled = proto.Field( - proto.BOOL, - number=1, - ) - - -class NetworkPolicy(proto.Message): - r"""Configuration options for the NetworkPolicy feature. - https://kubernetes.io/docs/concepts/services- - networking/networkpolicies/ - - Attributes: - provider (google.container_v1.types.NetworkPolicy.Provider): - The selected network policy provider. - enabled (bool): - Whether network policy is enabled on the - cluster. - """ - class Provider(proto.Enum): - r"""Allowed Network Policy providers.""" - PROVIDER_UNSPECIFIED = 0 - CALICO = 1 - - provider = proto.Field( - proto.ENUM, - number=1, - enum=Provider, - ) - enabled = proto.Field( - proto.BOOL, - number=2, - ) - - -class BinaryAuthorization(proto.Message): - r"""Configuration for Binary Authorization. - Attributes: - enabled (bool): - Enable Binary Authorization for this cluster. - If enabled, all container images will be - validated by Binary Authorization. - """ - - enabled = proto.Field( - proto.BOOL, - number=1, - ) - - -class IPAllocationPolicy(proto.Message): - r"""Configuration for controlling how IPs are allocated in the - cluster. - - Attributes: - use_ip_aliases (bool): - Whether alias IPs will be used for pod IPs in the cluster. - This is used in conjunction with use_routes. It cannot be - true if use_routes is true. If both use_ip_aliases and - use_routes are false, then the server picks the default IP - allocation mode - create_subnetwork (bool): - Whether a new subnetwork will be created automatically for - the cluster. - - This field is only applicable when ``use_ip_aliases`` is - true. - subnetwork_name (str): - A custom subnetwork name to be used if ``create_subnetwork`` - is true. If this field is empty, then an automatic name will - be chosen for the new subnetwork. - cluster_ipv4_cidr (str): - This field is deprecated, use cluster_ipv4_cidr_block. - node_ipv4_cidr (str): - This field is deprecated, use node_ipv4_cidr_block. - services_ipv4_cidr (str): - This field is deprecated, use services_ipv4_cidr_block. - cluster_secondary_range_name (str): - The name of the secondary range to be used for the cluster - CIDR block. The secondary range will be used for pod IP - addresses. This must be an existing secondary range - associated with the cluster subnetwork. - - This field is only applicable with use_ip_aliases is true - and create_subnetwork is false. - services_secondary_range_name (str): - The name of the secondary range to be used as for the - services CIDR block. The secondary range will be used for - service ClusterIPs. This must be an existing secondary range - associated with the cluster subnetwork. - - This field is only applicable with use_ip_aliases is true - and create_subnetwork is false. - cluster_ipv4_cidr_block (str): - The IP address range for the cluster pod IPs. If this field - is set, then ``cluster.cluster_ipv4_cidr`` must be left - blank. - - This field is only applicable when ``use_ip_aliases`` is - true. - - Set to blank to have a range chosen with the default size. - - Set to /netmask (e.g. ``/14``) to have a range chosen with a - specific netmask. - - Set to a - `CIDR `__ - notation (e.g. ``10.96.0.0/14``) from the RFC-1918 private - networks (e.g. ``10.0.0.0/8``, ``172.16.0.0/12``, - ``192.168.0.0/16``) to pick a specific range to use. - node_ipv4_cidr_block (str): - The IP address range of the instance IPs in this cluster. - - This is applicable only if ``create_subnetwork`` is true. - - Set to blank to have a range chosen with the default size. - - Set to /netmask (e.g. ``/14``) to have a range chosen with a - specific netmask. - - Set to a - `CIDR `__ - notation (e.g. ``10.96.0.0/14``) from the RFC-1918 private - networks (e.g. ``10.0.0.0/8``, ``172.16.0.0/12``, - ``192.168.0.0/16``) to pick a specific range to use. - services_ipv4_cidr_block (str): - The IP address range of the services IPs in this cluster. If - blank, a range will be automatically chosen with the default - size. - - This field is only applicable when ``use_ip_aliases`` is - true. - - Set to blank to have a range chosen with the default size. - - Set to /netmask (e.g. ``/14``) to have a range chosen with a - specific netmask. - - Set to a - `CIDR `__ - notation (e.g. ``10.96.0.0/14``) from the RFC-1918 private - networks (e.g. ``10.0.0.0/8``, ``172.16.0.0/12``, - ``192.168.0.0/16``) to pick a specific range to use. - tpu_ipv4_cidr_block (str): - The IP address range of the Cloud TPUs in this cluster. If - unspecified, a range will be automatically chosen with the - default size. - - This field is only applicable when ``use_ip_aliases`` is - true. - - If unspecified, the range will use the default size. - - Set to /netmask (e.g. ``/14``) to have a range chosen with a - specific netmask. - - Set to a - `CIDR `__ - notation (e.g. ``10.96.0.0/14``) from the RFC-1918 private - networks (e.g. ``10.0.0.0/8``, ``172.16.0.0/12``, - ``192.168.0.0/16``) to pick a specific range to use. - use_routes (bool): - Whether routes will be used for pod IPs in the cluster. This - is used in conjunction with use_ip_aliases. It cannot be - true if use_ip_aliases is true. If both use_ip_aliases and - use_routes are false, then the server picks the default IP - allocation mode - """ - - use_ip_aliases = proto.Field( - proto.BOOL, - number=1, - ) - create_subnetwork = proto.Field( - proto.BOOL, - number=2, - ) - subnetwork_name = proto.Field( - proto.STRING, - number=3, - ) - cluster_ipv4_cidr = proto.Field( - proto.STRING, - number=4, - ) - node_ipv4_cidr = proto.Field( - proto.STRING, - number=5, - ) - services_ipv4_cidr = proto.Field( - proto.STRING, - number=6, - ) - cluster_secondary_range_name = proto.Field( - proto.STRING, - number=7, - ) - services_secondary_range_name = proto.Field( - proto.STRING, - number=8, - ) - cluster_ipv4_cidr_block = proto.Field( - proto.STRING, - number=9, - ) - node_ipv4_cidr_block = proto.Field( - proto.STRING, - number=10, - ) - services_ipv4_cidr_block = proto.Field( - proto.STRING, - number=11, - ) - tpu_ipv4_cidr_block = proto.Field( - proto.STRING, - number=13, - ) - use_routes = proto.Field( - proto.BOOL, - number=15, - ) - - -class Cluster(proto.Message): - r"""A Google Kubernetes Engine cluster. - Attributes: - name (str): - The name of this cluster. The name must be unique within - this project and location (e.g. zone or region), and can be - up to 40 characters with the following restrictions: - - - Lowercase letters, numbers, and hyphens only. - - Must start with a letter. - - Must end with a number or a letter. - description (str): - An optional description of this cluster. - initial_node_count (int): - The number of nodes to create in this cluster. You must - ensure that your Compute Engine `resource - quota `__ is - sufficient for this number of instances. You must also have - available firewall and routes quota. For requests, this - field should only be used in lieu of a "node_pool" object, - since this configuration (along with the "node_config") will - be used to create a "NodePool" object with an auto-generated - name. Do not use this and a node_pool at the same time. - - This field is deprecated, use node_pool.initial_node_count - instead. - node_config (google.container_v1.types.NodeConfig): - Parameters used in creating the cluster's nodes. For - requests, this field should only be used in lieu of a - "node_pool" object, since this configuration (along with the - "initial_node_count") will be used to create a "NodePool" - object with an auto-generated name. Do not use this and a - node_pool at the same time. For responses, this field will - be populated with the node configuration of the first node - pool. (For configuration of each node pool, see - ``node_pool.config``) - - If unspecified, the defaults are used. This field is - deprecated, use node_pool.config instead. - master_auth (google.container_v1.types.MasterAuth): - The authentication information for accessing the master - endpoint. If unspecified, the defaults are used: For - clusters before v1.12, if master_auth is unspecified, - ``username`` will be set to "admin", a random password will - be generated, and a client certificate will be issued. - logging_service (str): - The logging service the cluster should use to write logs. - Currently available options: - - - ``logging.googleapis.com/kubernetes`` - The Cloud Logging - service with a Kubernetes-native resource model - - ``logging.googleapis.com`` - The legacy Cloud Logging - service (no longer available as of GKE 1.15). - - ``none`` - no logs will be exported from the cluster. - - If left as an empty - string,\ ``logging.googleapis.com/kubernetes`` will be used - for GKE 1.14+ or ``logging.googleapis.com`` for earlier - versions. - monitoring_service (str): - The monitoring service the cluster should use to write - metrics. Currently available options: - - - "monitoring.googleapis.com/kubernetes" - The Cloud - Monitoring service with a Kubernetes-native resource - model - - ``monitoring.googleapis.com`` - The legacy Cloud - Monitoring service (no longer available as of GKE 1.15). - - ``none`` - No metrics will be exported from the cluster. - - If left as an empty - string,\ ``monitoring.googleapis.com/kubernetes`` will be - used for GKE 1.14+ or ``monitoring.googleapis.com`` for - earlier versions. - network (str): - The name of the Google Compute Engine - `network `__ - to which the cluster is connected. If left unspecified, the - ``default`` network will be used. - cluster_ipv4_cidr (str): - The IP address range of the container pods in this cluster, - in - `CIDR `__ - notation (e.g. ``10.96.0.0/14``). Leave blank to have one - automatically chosen or specify a ``/14`` block in - ``10.0.0.0/8``. - addons_config (google.container_v1.types.AddonsConfig): - Configurations for the various addons - available to run in the cluster. - subnetwork (str): - The name of the Google Compute Engine - `subnetwork `__ - to which the cluster is connected. - node_pools (Sequence[google.container_v1.types.NodePool]): - The node pools associated with this cluster. This field - should not be set if "node_config" or "initial_node_count" - are specified. - locations (Sequence[str]): - The list of Google Compute Engine - `zones `__ - in which the cluster's nodes should be located. - - This field provides a default value if - `NodePool.Locations `__ - are not specified during node pool creation. - - Warning: changing cluster locations will update the - `NodePool.Locations `__ - of all node pools and will result in nodes being added - and/or removed. - enable_kubernetes_alpha (bool): - Kubernetes alpha features are enabled on this - cluster. This includes alpha API groups (e.g. - v1alpha1) and features that may not be - production ready in the kubernetes version of - the master and nodes. The cluster has no SLA for - uptime and master/node upgrades are disabled. - Alpha enabled clusters are automatically deleted - thirty days after creation. - resource_labels (Sequence[google.container_v1.types.Cluster.ResourceLabelsEntry]): - The resource labels for the cluster to use to - annotate any related Google Compute Engine - resources. - label_fingerprint (str): - The fingerprint of the set of labels for this - cluster. - legacy_abac (google.container_v1.types.LegacyAbac): - Configuration for the legacy ABAC - authorization mode. - network_policy (google.container_v1.types.NetworkPolicy): - Configuration options for the NetworkPolicy - feature. - ip_allocation_policy (google.container_v1.types.IPAllocationPolicy): - Configuration for cluster IP allocation. - master_authorized_networks_config (google.container_v1.types.MasterAuthorizedNetworksConfig): - The configuration options for master - authorized networks feature. - maintenance_policy (google.container_v1.types.MaintenancePolicy): - Configure the maintenance policy for this - cluster. - binary_authorization (google.container_v1.types.BinaryAuthorization): - Configuration for Binary Authorization. - autoscaling (google.container_v1.types.ClusterAutoscaling): - Cluster-level autoscaling configuration. - network_config (google.container_v1.types.NetworkConfig): - Configuration for cluster networking. - default_max_pods_constraint (google.container_v1.types.MaxPodsConstraint): - The default constraint on the maximum number - of pods that can be run simultaneously on a node - in the node pool of this cluster. Only honored - if cluster created with IP Alias support. - resource_usage_export_config (google.container_v1.types.ResourceUsageExportConfig): - Configuration for exporting resource usages. - Resource usage export is disabled when this - config is unspecified. - authenticator_groups_config (google.container_v1.types.AuthenticatorGroupsConfig): - Configuration controlling RBAC group - membership information. - private_cluster_config (google.container_v1.types.PrivateClusterConfig): - Configuration for private cluster. - database_encryption (google.container_v1.types.DatabaseEncryption): - Configuration of etcd encryption. - vertical_pod_autoscaling (google.container_v1.types.VerticalPodAutoscaling): - Cluster-level Vertical Pod Autoscaling - configuration. - shielded_nodes (google.container_v1.types.ShieldedNodes): - Shielded Nodes configuration. - release_channel (google.container_v1.types.ReleaseChannel): - Release channel configuration. - workload_identity_config (google.container_v1.types.WorkloadIdentityConfig): - Configuration for the use of Kubernetes - Service Accounts in GCP IAM policies. - self_link (str): - [Output only] Server-defined URL for the resource. - zone (str): - [Output only] The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field is deprecated, use - location instead. - endpoint (str): - [Output only] The IP address of this cluster's master - endpoint. The endpoint can be accessed from the internet at - ``https://username:password@endpoint/``. - - See the ``masterAuth`` property of this resource for - username and password information. - initial_cluster_version (str): - The initial Kubernetes version for this - cluster. Valid versions are those found in - validMasterVersions returned by getServerConfig. - The version can be upgraded over time; such - upgrades are reflected in currentMasterVersion - and currentNodeVersion. - - Users may specify either explicit versions - offered by Kubernetes Engine or version aliases, - which have the following behavior: - - "latest": picks the highest valid Kubernetes - version - "1.X": picks the highest valid - patch+gke.N patch in the 1.X version - "1.X.Y": - picks the highest valid gke.N patch in the 1.X.Y - version - "1.X.Y-gke.N": picks an explicit - Kubernetes version - "","-": picks the default - Kubernetes version - current_master_version (str): - [Output only] The current software version of the master - endpoint. - current_node_version (str): - [Output only] Deprecated, use - `NodePools.version `__ - instead. The current version of the node software - components. If they are currently at multiple versions - because they're in the process of being upgraded, this - reflects the minimum version of all nodes. - create_time (str): - [Output only] The time the cluster was created, in - `RFC3339 `__ text - format. - status (google.container_v1.types.Cluster.Status): - [Output only] The current status of this cluster. - status_message (str): - [Output only] Deprecated. Use conditions instead. Additional - information about the current status of this cluster, if - available. - node_ipv4_cidr_size (int): - [Output only] The size of the address space on each node for - hosting containers. This is provisioned from within the - ``container_ipv4_cidr`` range. This field will only be set - when cluster is in route-based network mode. - services_ipv4_cidr (str): - [Output only] The IP address range of the Kubernetes - services in this cluster, in - `CIDR `__ - notation (e.g. ``1.2.3.4/29``). Service addresses are - typically put in the last ``/16`` from the container CIDR. - instance_group_urls (Sequence[str]): - Deprecated. Use node_pools.instance_group_urls. - current_node_count (int): - [Output only] The number of nodes currently in the cluster. - Deprecated. Call Kubernetes API directly to retrieve node - information. - expire_time (str): - [Output only] The time the cluster will be automatically - deleted in - `RFC3339 `__ text - format. - location (str): - [Output only] The name of the Google Compute Engine - `zone `__ - or - `region `__ - in which the cluster resides. - enable_tpu (bool): - Enable the ability to use Cloud TPUs in this - cluster. - tpu_ipv4_cidr_block (str): - [Output only] The IP address range of the Cloud TPUs in this - cluster, in - `CIDR `__ - notation (e.g. ``1.2.3.4/29``). - conditions (Sequence[google.container_v1.types.StatusCondition]): - Which conditions caused the current cluster - state. - """ - class Status(proto.Enum): - r"""The current status of the cluster.""" - STATUS_UNSPECIFIED = 0 - PROVISIONING = 1 - RUNNING = 2 - RECONCILING = 3 - STOPPING = 4 - ERROR = 5 - DEGRADED = 6 - - name = proto.Field( - proto.STRING, - number=1, - ) - description = proto.Field( - proto.STRING, - number=2, - ) - initial_node_count = proto.Field( - proto.INT32, - number=3, - ) - node_config = proto.Field( - proto.MESSAGE, - number=4, - message='NodeConfig', - ) - master_auth = proto.Field( - proto.MESSAGE, - number=5, - message='MasterAuth', - ) - logging_service = proto.Field( - proto.STRING, - number=6, - ) - monitoring_service = proto.Field( - proto.STRING, - number=7, - ) - network = proto.Field( - proto.STRING, - number=8, - ) - cluster_ipv4_cidr = proto.Field( - proto.STRING, - number=9, - ) - addons_config = proto.Field( - proto.MESSAGE, - number=10, - message='AddonsConfig', - ) - subnetwork = proto.Field( - proto.STRING, - number=11, - ) - node_pools = proto.RepeatedField( - proto.MESSAGE, - number=12, - message='NodePool', - ) - locations = proto.RepeatedField( - proto.STRING, - number=13, - ) - enable_kubernetes_alpha = proto.Field( - proto.BOOL, - number=14, - ) - resource_labels = proto.MapField( - proto.STRING, - proto.STRING, - number=15, - ) - label_fingerprint = proto.Field( - proto.STRING, - number=16, - ) - legacy_abac = proto.Field( - proto.MESSAGE, - number=18, - message='LegacyAbac', - ) - network_policy = proto.Field( - proto.MESSAGE, - number=19, - message='NetworkPolicy', - ) - ip_allocation_policy = proto.Field( - proto.MESSAGE, - number=20, - message='IPAllocationPolicy', - ) - master_authorized_networks_config = proto.Field( - proto.MESSAGE, - number=22, - message='MasterAuthorizedNetworksConfig', - ) - maintenance_policy = proto.Field( - proto.MESSAGE, - number=23, - message='MaintenancePolicy', - ) - binary_authorization = proto.Field( - proto.MESSAGE, - number=24, - message='BinaryAuthorization', - ) - autoscaling = proto.Field( - proto.MESSAGE, - number=26, - message='ClusterAutoscaling', - ) - network_config = proto.Field( - proto.MESSAGE, - number=27, - message='NetworkConfig', - ) - default_max_pods_constraint = proto.Field( - proto.MESSAGE, - number=30, - message='MaxPodsConstraint', - ) - resource_usage_export_config = proto.Field( - proto.MESSAGE, - number=33, - message='ResourceUsageExportConfig', - ) - authenticator_groups_config = proto.Field( - proto.MESSAGE, - number=34, - message='AuthenticatorGroupsConfig', - ) - private_cluster_config = proto.Field( - proto.MESSAGE, - number=37, - message='PrivateClusterConfig', - ) - database_encryption = proto.Field( - proto.MESSAGE, - number=38, - message='DatabaseEncryption', - ) - vertical_pod_autoscaling = proto.Field( - proto.MESSAGE, - number=39, - message='VerticalPodAutoscaling', - ) - shielded_nodes = proto.Field( - proto.MESSAGE, - number=40, - message='ShieldedNodes', - ) - release_channel = proto.Field( - proto.MESSAGE, - number=41, - message='ReleaseChannel', - ) - workload_identity_config = proto.Field( - proto.MESSAGE, - number=43, - message='WorkloadIdentityConfig', - ) - self_link = proto.Field( - proto.STRING, - number=100, - ) - zone = proto.Field( - proto.STRING, - number=101, - ) - endpoint = proto.Field( - proto.STRING, - number=102, - ) - initial_cluster_version = proto.Field( - proto.STRING, - number=103, - ) - current_master_version = proto.Field( - proto.STRING, - number=104, - ) - current_node_version = proto.Field( - proto.STRING, - number=105, - ) - create_time = proto.Field( - proto.STRING, - number=106, - ) - status = proto.Field( - proto.ENUM, - number=107, - enum=Status, - ) - status_message = proto.Field( - proto.STRING, - number=108, - ) - node_ipv4_cidr_size = proto.Field( - proto.INT32, - number=109, - ) - services_ipv4_cidr = proto.Field( - proto.STRING, - number=110, - ) - instance_group_urls = proto.RepeatedField( - proto.STRING, - number=111, - ) - current_node_count = proto.Field( - proto.INT32, - number=112, - ) - expire_time = proto.Field( - proto.STRING, - number=113, - ) - location = proto.Field( - proto.STRING, - number=114, - ) - enable_tpu = proto.Field( - proto.BOOL, - number=115, - ) - tpu_ipv4_cidr_block = proto.Field( - proto.STRING, - number=116, - ) - conditions = proto.RepeatedField( - proto.MESSAGE, - number=118, - message='StatusCondition', - ) - - -class ClusterUpdate(proto.Message): - r"""ClusterUpdate describes an update to the cluster. Exactly one - update can be applied to a cluster with each request, so at most - one field can be provided. - - Attributes: - desired_node_version (str): - The Kubernetes version to change the nodes to - (typically an upgrade). - - Users may specify either explicit versions - offered by Kubernetes Engine or version aliases, - which have the following behavior: - - "latest": picks the highest valid Kubernetes - version - "1.X": picks the highest valid - patch+gke.N patch in the 1.X version - "1.X.Y": - picks the highest valid gke.N patch in the 1.X.Y - version - "1.X.Y-gke.N": picks an explicit - Kubernetes version - "-": picks the Kubernetes - master version - desired_monitoring_service (str): - The monitoring service the cluster should use to write - metrics. Currently available options: - - - "monitoring.googleapis.com/kubernetes" - The Cloud - Monitoring service with a Kubernetes-native resource - model - - ``monitoring.googleapis.com`` - The legacy Cloud - Monitoring service (no longer available as of GKE 1.15). - - ``none`` - No metrics will be exported from the cluster. - - If left as an empty - string,\ ``monitoring.googleapis.com/kubernetes`` will be - used for GKE 1.14+ or ``monitoring.googleapis.com`` for - earlier versions. - desired_addons_config (google.container_v1.types.AddonsConfig): - Configurations for the various addons - available to run in the cluster. - desired_node_pool_id (str): - The node pool to be upgraded. This field is mandatory if - "desired_node_version", "desired_image_family" or - "desired_node_pool_autoscaling" is specified and there is - more than one node pool on the cluster. - desired_image_type (str): - The desired image type for the node pool. NOTE: Set the - "desired_node_pool" field as well. - desired_database_encryption (google.container_v1.types.DatabaseEncryption): - Configuration of etcd encryption. - desired_workload_identity_config (google.container_v1.types.WorkloadIdentityConfig): - Configuration for Workload Identity. - desired_shielded_nodes (google.container_v1.types.ShieldedNodes): - Configuration for Shielded Nodes. - desired_node_pool_autoscaling (google.container_v1.types.NodePoolAutoscaling): - Autoscaler configuration for the node pool specified in - desired_node_pool_id. If there is only one pool in the - cluster and desired_node_pool_id is not provided then the - change applies to that single node pool. - desired_locations (Sequence[str]): - The desired list of Google Compute Engine - `zones `__ - in which the cluster's nodes should be located. - - This list must always include the cluster's primary zone. - - Warning: changing cluster locations will update the - locations of all node pools and will result in nodes being - added and/or removed. - desired_master_authorized_networks_config (google.container_v1.types.MasterAuthorizedNetworksConfig): - The desired configuration options for master - authorized networks feature. - desired_cluster_autoscaling (google.container_v1.types.ClusterAutoscaling): - Cluster-level autoscaling configuration. - desired_binary_authorization (google.container_v1.types.BinaryAuthorization): - The desired configuration options for the - Binary Authorization feature. - desired_logging_service (str): - The logging service the cluster should use to write logs. - Currently available options: - - - ``logging.googleapis.com/kubernetes`` - The Cloud Logging - service with a Kubernetes-native resource model - - ``logging.googleapis.com`` - The legacy Cloud Logging - service (no longer available as of GKE 1.15). - - ``none`` - no logs will be exported from the cluster. - - If left as an empty - string,\ ``logging.googleapis.com/kubernetes`` will be used - for GKE 1.14+ or ``logging.googleapis.com`` for earlier - versions. - desired_resource_usage_export_config (google.container_v1.types.ResourceUsageExportConfig): - The desired configuration for exporting - resource usage. - desired_vertical_pod_autoscaling (google.container_v1.types.VerticalPodAutoscaling): - Cluster-level Vertical Pod Autoscaling - configuration. - desired_private_cluster_config (google.container_v1.types.PrivateClusterConfig): - The desired private cluster configuration. - desired_intra_node_visibility_config (google.container_v1.types.IntraNodeVisibilityConfig): - The desired config of Intra-node visibility. - desired_default_snat_status (google.container_v1.types.DefaultSnatStatus): - The desired status of whether to disable - default sNAT for this cluster. - desired_release_channel (google.container_v1.types.ReleaseChannel): - The desired release channel configuration. - desired_authenticator_groups_config (google.container_v1.types.AuthenticatorGroupsConfig): - The desired authenticator groups config for - the cluster. - desired_master_version (str): - The Kubernetes version to change the master - to. - Users may specify either explicit versions - offered by Kubernetes Engine or version aliases, - which have the following behavior: - - "latest": picks the highest valid Kubernetes - version - "1.X": picks the highest valid - patch+gke.N patch in the 1.X version - "1.X.Y": - picks the highest valid gke.N patch in the 1.X.Y - version - "1.X.Y-gke.N": picks an explicit - Kubernetes version - "-": picks the default - Kubernetes version - """ - - desired_node_version = proto.Field( - proto.STRING, - number=4, - ) - desired_monitoring_service = proto.Field( - proto.STRING, - number=5, - ) - desired_addons_config = proto.Field( - proto.MESSAGE, - number=6, - message='AddonsConfig', - ) - desired_node_pool_id = proto.Field( - proto.STRING, - number=7, - ) - desired_image_type = proto.Field( - proto.STRING, - number=8, - ) - desired_database_encryption = proto.Field( - proto.MESSAGE, - number=46, - message='DatabaseEncryption', - ) - desired_workload_identity_config = proto.Field( - proto.MESSAGE, - number=47, - message='WorkloadIdentityConfig', - ) - desired_shielded_nodes = proto.Field( - proto.MESSAGE, - number=48, - message='ShieldedNodes', - ) - desired_node_pool_autoscaling = proto.Field( - proto.MESSAGE, - number=9, - message='NodePoolAutoscaling', - ) - desired_locations = proto.RepeatedField( - proto.STRING, - number=10, - ) - desired_master_authorized_networks_config = proto.Field( - proto.MESSAGE, - number=12, - message='MasterAuthorizedNetworksConfig', - ) - desired_cluster_autoscaling = proto.Field( - proto.MESSAGE, - number=15, - message='ClusterAutoscaling', - ) - desired_binary_authorization = proto.Field( - proto.MESSAGE, - number=16, - message='BinaryAuthorization', - ) - desired_logging_service = proto.Field( - proto.STRING, - number=19, - ) - desired_resource_usage_export_config = proto.Field( - proto.MESSAGE, - number=21, - message='ResourceUsageExportConfig', - ) - desired_vertical_pod_autoscaling = proto.Field( - proto.MESSAGE, - number=22, - message='VerticalPodAutoscaling', - ) - desired_private_cluster_config = proto.Field( - proto.MESSAGE, - number=25, - message='PrivateClusterConfig', - ) - desired_intra_node_visibility_config = proto.Field( - proto.MESSAGE, - number=26, - message='IntraNodeVisibilityConfig', - ) - desired_default_snat_status = proto.Field( - proto.MESSAGE, - number=28, - message='DefaultSnatStatus', - ) - desired_release_channel = proto.Field( - proto.MESSAGE, - number=31, - message='ReleaseChannel', - ) - desired_authenticator_groups_config = proto.Field( - proto.MESSAGE, - number=63, - message='AuthenticatorGroupsConfig', - ) - desired_master_version = proto.Field( - proto.STRING, - number=100, - ) - - -class Operation(proto.Message): - r"""This operation resource represents operations that may have - happened or are happening on the cluster. All fields are output - only. - - Attributes: - name (str): - The server-assigned ID for the operation. - zone (str): - The name of the Google Compute Engine - `zone `__ - in which the operation is taking place. This field is - deprecated, use location instead. - operation_type (google.container_v1.types.Operation.Type): - The operation type. - status (google.container_v1.types.Operation.Status): - The current status of the operation. - detail (str): - Detailed operation progress, if available. - status_message (str): - Output only. If an error has occurred, a - textual description of the error. - self_link (str): - Server-defined URL for the resource. - target_link (str): - Server-defined URL for the target of the - operation. - location (str): - [Output only] The name of the Google Compute Engine - `zone `__ - or - `region `__ - in which the cluster resides. - start_time (str): - [Output only] The time the operation started, in - `RFC3339 `__ text - format. - end_time (str): - [Output only] The time the operation completed, in - `RFC3339 `__ text - format. - progress (google.container_v1.types.OperationProgress): - Output only. [Output only] Progress information for an - operation. - cluster_conditions (Sequence[google.container_v1.types.StatusCondition]): - Which conditions caused the current cluster - state. - nodepool_conditions (Sequence[google.container_v1.types.StatusCondition]): - Which conditions caused the current node pool - state. - """ - class Status(proto.Enum): - r"""Current status of the operation.""" - STATUS_UNSPECIFIED = 0 - PENDING = 1 - RUNNING = 2 - DONE = 3 - ABORTING = 4 - - class Type(proto.Enum): - r"""Operation type.""" - TYPE_UNSPECIFIED = 0 - CREATE_CLUSTER = 1 - DELETE_CLUSTER = 2 - UPGRADE_MASTER = 3 - UPGRADE_NODES = 4 - REPAIR_CLUSTER = 5 - UPDATE_CLUSTER = 6 - CREATE_NODE_POOL = 7 - DELETE_NODE_POOL = 8 - SET_NODE_POOL_MANAGEMENT = 9 - AUTO_REPAIR_NODES = 10 - AUTO_UPGRADE_NODES = 11 - SET_LABELS = 12 - SET_MASTER_AUTH = 13 - SET_NODE_POOL_SIZE = 14 - SET_NETWORK_POLICY = 15 - SET_MAINTENANCE_POLICY = 16 - - name = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - operation_type = proto.Field( - proto.ENUM, - number=3, - enum=Type, - ) - status = proto.Field( - proto.ENUM, - number=4, - enum=Status, - ) - detail = proto.Field( - proto.STRING, - number=8, - ) - status_message = proto.Field( - proto.STRING, - number=5, - ) - self_link = proto.Field( - proto.STRING, - number=6, - ) - target_link = proto.Field( - proto.STRING, - number=7, - ) - location = proto.Field( - proto.STRING, - number=9, - ) - start_time = proto.Field( - proto.STRING, - number=10, - ) - end_time = proto.Field( - proto.STRING, - number=11, - ) - progress = proto.Field( - proto.MESSAGE, - number=12, - message='OperationProgress', - ) - cluster_conditions = proto.RepeatedField( - proto.MESSAGE, - number=13, - message='StatusCondition', - ) - nodepool_conditions = proto.RepeatedField( - proto.MESSAGE, - number=14, - message='StatusCondition', - ) - - -class OperationProgress(proto.Message): - r"""Information about operation (or operation stage) progress. - Attributes: - name (str): - A non-parameterized string describing an - operation stage. Unset for single-stage - operations. - status (google.container_v1.types.Operation.Status): - Status of an operation stage. - Unset for single-stage operations. - metrics (Sequence[google.container_v1.types.OperationProgress.Metric]): - Progress metric bundle, for example: metrics: [{name: "nodes - done", int_value: 15}, {name: "nodes total", int_value: 32}] - or metrics: [{name: "progress", double_value: 0.56}, {name: - "progress scale", double_value: 1.0}] - stages (Sequence[google.container_v1.types.OperationProgress]): - Substages of an operation or a stage. - """ - - class Metric(proto.Message): - r"""Progress metric is (string, int|float|string) pair. - Attributes: - name (str): - Required. Metric name, e.g., "nodes total", - "percent done". - int_value (int): - For metrics with integer value. - double_value (float): - For metrics with floating point value. - string_value (str): - For metrics with custom values (ratios, - visual progress, etc.). - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - int_value = proto.Field( - proto.INT64, - number=2, - oneof='value', - ) - double_value = proto.Field( - proto.DOUBLE, - number=3, - oneof='value', - ) - string_value = proto.Field( - proto.STRING, - number=4, - oneof='value', - ) - - name = proto.Field( - proto.STRING, - number=1, - ) - status = proto.Field( - proto.ENUM, - number=2, - enum='Operation.Status', - ) - metrics = proto.RepeatedField( - proto.MESSAGE, - number=3, - message=Metric, - ) - stages = proto.RepeatedField( - proto.MESSAGE, - number=4, - message='OperationProgress', - ) - - -class CreateClusterRequest(proto.Message): - r"""CreateClusterRequest creates a cluster. - Attributes: - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the parent - field. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the parent field. - cluster (google.container_v1.types.Cluster): - Required. A `cluster - resource `__ - parent (str): - The parent (project and location) where the cluster will be - created. Specified in the format ``projects/*/locations/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster = proto.Field( - proto.MESSAGE, - number=3, - message='Cluster', - ) - parent = proto.Field( - proto.STRING, - number=5, - ) - - -class GetClusterRequest(proto.Message): - r"""GetClusterRequest gets the settings of a cluster. - Attributes: - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the name field. - cluster_id (str): - Deprecated. The name of the cluster to - retrieve. This field has been deprecated and - replaced by the name field. - name (str): - The name (project, location, cluster) of the cluster to - retrieve. Specified in the format - ``projects/*/locations/*/clusters/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster_id = proto.Field( - proto.STRING, - number=3, - ) - name = proto.Field( - proto.STRING, - number=5, - ) - - -class UpdateClusterRequest(proto.Message): - r"""UpdateClusterRequest updates the settings of a cluster. - Attributes: - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the name field. - cluster_id (str): - Deprecated. The name of the cluster to - upgrade. This field has been deprecated and - replaced by the name field. - update (google.container_v1.types.ClusterUpdate): - Required. A description of the update. - name (str): - The name (project, location, cluster) of the cluster to - update. Specified in the format - ``projects/*/locations/*/clusters/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster_id = proto.Field( - proto.STRING, - number=3, - ) - update = proto.Field( - proto.MESSAGE, - number=4, - message='ClusterUpdate', - ) - name = proto.Field( - proto.STRING, - number=5, - ) - - -class UpdateNodePoolRequest(proto.Message): - r"""UpdateNodePoolRequests update a node pool's image and/or - version. - - Attributes: - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the name field. - cluster_id (str): - Deprecated. The name of the cluster to - upgrade. This field has been deprecated and - replaced by the name field. - node_pool_id (str): - Deprecated. The name of the node pool to - upgrade. This field has been deprecated and - replaced by the name field. - node_version (str): - Required. The Kubernetes version to change - the nodes to (typically an upgrade). - - Users may specify either explicit versions - offered by Kubernetes Engine or version aliases, - which have the following behavior: - - "latest": picks the highest valid Kubernetes - version - "1.X": picks the highest valid - patch+gke.N patch in the 1.X version - "1.X.Y": - picks the highest valid gke.N patch in the 1.X.Y - version - "1.X.Y-gke.N": picks an explicit - Kubernetes version - "-": picks the Kubernetes - master version - image_type (str): - Required. The desired image type for the node - pool. - name (str): - The name (project, location, cluster, node pool) of the node - pool to update. Specified in the format - ``projects/*/locations/*/clusters/*/nodePools/*``. - locations (Sequence[str]): - The desired list of Google Compute Engine - `zones `__ - in which the node pool's nodes should be located. Changing - the locations for a node pool will result in nodes being - either created or removed from the node pool, depending on - whether locations are being added or removed. - workload_metadata_config (google.container_v1.types.WorkloadMetadataConfig): - The desired workload metadata config for the - node pool. - upgrade_settings (google.container_v1.types.NodePool.UpgradeSettings): - Upgrade settings control disruption and speed - of the upgrade. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster_id = proto.Field( - proto.STRING, - number=3, - ) - node_pool_id = proto.Field( - proto.STRING, - number=4, - ) - node_version = proto.Field( - proto.STRING, - number=5, - ) - image_type = proto.Field( - proto.STRING, - number=6, - ) - name = proto.Field( - proto.STRING, - number=8, - ) - locations = proto.RepeatedField( - proto.STRING, - number=13, - ) - workload_metadata_config = proto.Field( - proto.MESSAGE, - number=14, - message='WorkloadMetadataConfig', - ) - upgrade_settings = proto.Field( - proto.MESSAGE, - number=15, - message='NodePool.UpgradeSettings', - ) - - -class SetNodePoolAutoscalingRequest(proto.Message): - r"""SetNodePoolAutoscalingRequest sets the autoscaler settings of - a node pool. - - Attributes: - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the name field. - cluster_id (str): - Deprecated. The name of the cluster to - upgrade. This field has been deprecated and - replaced by the name field. - node_pool_id (str): - Deprecated. The name of the node pool to - upgrade. This field has been deprecated and - replaced by the name field. - autoscaling (google.container_v1.types.NodePoolAutoscaling): - Required. Autoscaling configuration for the - node pool. - name (str): - The name (project, location, cluster, node pool) of the node - pool to set autoscaler settings. Specified in the format - ``projects/*/locations/*/clusters/*/nodePools/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster_id = proto.Field( - proto.STRING, - number=3, - ) - node_pool_id = proto.Field( - proto.STRING, - number=4, - ) - autoscaling = proto.Field( - proto.MESSAGE, - number=5, - message='NodePoolAutoscaling', - ) - name = proto.Field( - proto.STRING, - number=6, - ) - - -class SetLoggingServiceRequest(proto.Message): - r"""SetLoggingServiceRequest sets the logging service of a - cluster. - - Attributes: - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the name field. - cluster_id (str): - Deprecated. The name of the cluster to - upgrade. This field has been deprecated and - replaced by the name field. - logging_service (str): - Required. The logging service the cluster should use to - write logs. Currently available options: - - - ``logging.googleapis.com/kubernetes`` - The Cloud Logging - service with a Kubernetes-native resource model - - ``logging.googleapis.com`` - The legacy Cloud Logging - service (no longer available as of GKE 1.15). - - ``none`` - no logs will be exported from the cluster. - - If left as an empty - string,\ ``logging.googleapis.com/kubernetes`` will be used - for GKE 1.14+ or ``logging.googleapis.com`` for earlier - versions. - name (str): - The name (project, location, cluster) of the cluster to set - logging. Specified in the format - ``projects/*/locations/*/clusters/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster_id = proto.Field( - proto.STRING, - number=3, - ) - logging_service = proto.Field( - proto.STRING, - number=4, - ) - name = proto.Field( - proto.STRING, - number=5, - ) - - -class SetMonitoringServiceRequest(proto.Message): - r"""SetMonitoringServiceRequest sets the monitoring service of a - cluster. - - Attributes: - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the name field. - cluster_id (str): - Deprecated. The name of the cluster to - upgrade. This field has been deprecated and - replaced by the name field. - monitoring_service (str): - Required. The monitoring service the cluster should use to - write metrics. Currently available options: - - - "monitoring.googleapis.com/kubernetes" - The Cloud - Monitoring service with a Kubernetes-native resource - model - - ``monitoring.googleapis.com`` - The legacy Cloud - Monitoring service (no longer available as of GKE 1.15). - - ``none`` - No metrics will be exported from the cluster. - - If left as an empty - string,\ ``monitoring.googleapis.com/kubernetes`` will be - used for GKE 1.14+ or ``monitoring.googleapis.com`` for - earlier versions. - name (str): - The name (project, location, cluster) of the cluster to set - monitoring. Specified in the format - ``projects/*/locations/*/clusters/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster_id = proto.Field( - proto.STRING, - number=3, - ) - monitoring_service = proto.Field( - proto.STRING, - number=4, - ) - name = proto.Field( - proto.STRING, - number=6, - ) - - -class SetAddonsConfigRequest(proto.Message): - r"""SetAddonsConfigRequest sets the addons associated with the - cluster. - - Attributes: - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the name field. - cluster_id (str): - Deprecated. The name of the cluster to - upgrade. This field has been deprecated and - replaced by the name field. - addons_config (google.container_v1.types.AddonsConfig): - Required. The desired configurations for the - various addons available to run in the cluster. - name (str): - The name (project, location, cluster) of the cluster to set - addons. Specified in the format - ``projects/*/locations/*/clusters/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster_id = proto.Field( - proto.STRING, - number=3, - ) - addons_config = proto.Field( - proto.MESSAGE, - number=4, - message='AddonsConfig', - ) - name = proto.Field( - proto.STRING, - number=6, - ) - - -class SetLocationsRequest(proto.Message): - r"""SetLocationsRequest sets the locations of the cluster. - Attributes: - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the name field. - cluster_id (str): - Deprecated. The name of the cluster to - upgrade. This field has been deprecated and - replaced by the name field. - locations (Sequence[str]): - Required. The desired list of Google Compute Engine - `zones `__ - in which the cluster's nodes should be located. Changing the - locations a cluster is in will result in nodes being either - created or removed from the cluster, depending on whether - locations are being added or removed. - - This list must always include the cluster's primary zone. - name (str): - The name (project, location, cluster) of the cluster to set - locations. Specified in the format - ``projects/*/locations/*/clusters/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster_id = proto.Field( - proto.STRING, - number=3, - ) - locations = proto.RepeatedField( - proto.STRING, - number=4, - ) - name = proto.Field( - proto.STRING, - number=6, - ) - - -class UpdateMasterRequest(proto.Message): - r"""UpdateMasterRequest updates the master of the cluster. - Attributes: - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the name field. - cluster_id (str): - Deprecated. The name of the cluster to - upgrade. This field has been deprecated and - replaced by the name field. - master_version (str): - Required. The Kubernetes version to change - the master to. - Users may specify either explicit versions - offered by Kubernetes Engine or version aliases, - which have the following behavior: - - "latest": picks the highest valid Kubernetes - version - "1.X": picks the highest valid - patch+gke.N patch in the 1.X version - "1.X.Y": - picks the highest valid gke.N patch in the 1.X.Y - version - "1.X.Y-gke.N": picks an explicit - Kubernetes version - "-": picks the default - Kubernetes version - name (str): - The name (project, location, cluster) of the cluster to - update. Specified in the format - ``projects/*/locations/*/clusters/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster_id = proto.Field( - proto.STRING, - number=3, - ) - master_version = proto.Field( - proto.STRING, - number=4, - ) - name = proto.Field( - proto.STRING, - number=7, - ) - - -class SetMasterAuthRequest(proto.Message): - r"""SetMasterAuthRequest updates the admin password of a cluster. - Attributes: - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the name field. - cluster_id (str): - Deprecated. The name of the cluster to - upgrade. This field has been deprecated and - replaced by the name field. - action (google.container_v1.types.SetMasterAuthRequest.Action): - Required. The exact form of action to be - taken on the master auth. - update (google.container_v1.types.MasterAuth): - Required. A description of the update. - name (str): - The name (project, location, cluster) of the cluster to set - auth. Specified in the format - ``projects/*/locations/*/clusters/*``. - """ - class Action(proto.Enum): - r"""Operation type: what type update to perform.""" - UNKNOWN = 0 - SET_PASSWORD = 1 - GENERATE_PASSWORD = 2 - SET_USERNAME = 3 - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster_id = proto.Field( - proto.STRING, - number=3, - ) - action = proto.Field( - proto.ENUM, - number=4, - enum=Action, - ) - update = proto.Field( - proto.MESSAGE, - number=5, - message='MasterAuth', - ) - name = proto.Field( - proto.STRING, - number=7, - ) - - -class DeleteClusterRequest(proto.Message): - r"""DeleteClusterRequest deletes a cluster. - Attributes: - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the name field. - cluster_id (str): - Deprecated. The name of the cluster to - delete. This field has been deprecated and - replaced by the name field. - name (str): - The name (project, location, cluster) of the cluster to - delete. Specified in the format - ``projects/*/locations/*/clusters/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster_id = proto.Field( - proto.STRING, - number=3, - ) - name = proto.Field( - proto.STRING, - number=4, - ) - - -class ListClustersRequest(proto.Message): - r"""ListClustersRequest lists clusters. - Attributes: - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the parent - field. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides, or "-" for all zones. This - field has been deprecated and replaced by the parent field. - parent (str): - The parent (project and location) where the clusters will be - listed. Specified in the format ``projects/*/locations/*``. - Location "-" matches all zones and all regions. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - parent = proto.Field( - proto.STRING, - number=4, - ) - - -class ListClustersResponse(proto.Message): - r"""ListClustersResponse is the result of ListClustersRequest. - Attributes: - clusters (Sequence[google.container_v1.types.Cluster]): - A list of clusters in the project in the - specified zone, or across all ones. - missing_zones (Sequence[str]): - If any zones are listed here, the list of - clusters returned may be missing those zones. - """ - - clusters = proto.RepeatedField( - proto.MESSAGE, - number=1, - message='Cluster', - ) - missing_zones = proto.RepeatedField( - proto.STRING, - number=2, - ) - - -class GetOperationRequest(proto.Message): - r"""GetOperationRequest gets a single operation. - Attributes: - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the name field. - operation_id (str): - Deprecated. The server-assigned ``name`` of the operation. - This field has been deprecated and replaced by the name - field. - name (str): - The name (project, location, operation id) of the operation - to get. Specified in the format - ``projects/*/locations/*/operations/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - operation_id = proto.Field( - proto.STRING, - number=3, - ) - name = proto.Field( - proto.STRING, - number=5, - ) - - -class ListOperationsRequest(proto.Message): - r"""ListOperationsRequest lists operations. - Attributes: - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the parent - field. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - to return operations for, or ``-`` for all zones. This field - has been deprecated and replaced by the parent field. - parent (str): - The parent (project and location) where the operations will - be listed. Specified in the format - ``projects/*/locations/*``. Location "-" matches all zones - and all regions. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - parent = proto.Field( - proto.STRING, - number=4, - ) - - -class CancelOperationRequest(proto.Message): - r"""CancelOperationRequest cancels a single operation. - Attributes: - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the operation resides. This field has been - deprecated and replaced by the name field. - operation_id (str): - Deprecated. The server-assigned ``name`` of the operation. - This field has been deprecated and replaced by the name - field. - name (str): - The name (project, location, operation id) of the operation - to cancel. Specified in the format - ``projects/*/locations/*/operations/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - operation_id = proto.Field( - proto.STRING, - number=3, - ) - name = proto.Field( - proto.STRING, - number=4, - ) - - -class ListOperationsResponse(proto.Message): - r"""ListOperationsResponse is the result of - ListOperationsRequest. - - Attributes: - operations (Sequence[google.container_v1.types.Operation]): - A list of operations in the project in the - specified zone. - missing_zones (Sequence[str]): - If any zones are listed here, the list of - operations returned may be missing the - operations from those zones. - """ - - operations = proto.RepeatedField( - proto.MESSAGE, - number=1, - message='Operation', - ) - missing_zones = proto.RepeatedField( - proto.STRING, - number=2, - ) - - -class GetServerConfigRequest(proto.Message): - r"""Gets the current Kubernetes Engine service configuration. - Attributes: - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - to return operations for. This field has been deprecated and - replaced by the name field. - name (str): - The name (project and location) of the server config to get, - specified in the format ``projects/*/locations/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - name = proto.Field( - proto.STRING, - number=4, - ) - - -class ServerConfig(proto.Message): - r"""Kubernetes Engine service configuration. - Attributes: - default_cluster_version (str): - Version of Kubernetes the service deploys by - default. - valid_node_versions (Sequence[str]): - List of valid node upgrade target versions, - in descending order. - default_image_type (str): - Default image type. - valid_image_types (Sequence[str]): - List of valid image types. - valid_master_versions (Sequence[str]): - List of valid master versions, in descending - order. - channels (Sequence[google.container_v1.types.ServerConfig.ReleaseChannelConfig]): - List of release channel configurations. - """ - - class ReleaseChannelConfig(proto.Message): - r"""ReleaseChannelConfig exposes configuration for a release - channel. - - Attributes: - channel (google.container_v1.types.ReleaseChannel.Channel): - The release channel this configuration - applies to. - default_version (str): - The default version for newly created - clusters on the channel. - valid_versions (Sequence[str]): - List of valid versions for the channel. - """ - - channel = proto.Field( - proto.ENUM, - number=1, - enum='ReleaseChannel.Channel', - ) - default_version = proto.Field( - proto.STRING, - number=2, - ) - valid_versions = proto.RepeatedField( - proto.STRING, - number=4, - ) - - default_cluster_version = proto.Field( - proto.STRING, - number=1, - ) - valid_node_versions = proto.RepeatedField( - proto.STRING, - number=3, - ) - default_image_type = proto.Field( - proto.STRING, - number=4, - ) - valid_image_types = proto.RepeatedField( - proto.STRING, - number=5, - ) - valid_master_versions = proto.RepeatedField( - proto.STRING, - number=6, - ) - channels = proto.RepeatedField( - proto.MESSAGE, - number=9, - message=ReleaseChannelConfig, - ) - - -class CreateNodePoolRequest(proto.Message): - r"""CreateNodePoolRequest creates a node pool for a cluster. - Attributes: - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the parent - field. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the parent field. - cluster_id (str): - Deprecated. The name of the cluster. - This field has been deprecated and replaced by - the parent field. - node_pool (google.container_v1.types.NodePool): - Required. The node pool to create. - parent (str): - The parent (project, location, cluster id) where the node - pool will be created. Specified in the format - ``projects/*/locations/*/clusters/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster_id = proto.Field( - proto.STRING, - number=3, - ) - node_pool = proto.Field( - proto.MESSAGE, - number=4, - message='NodePool', - ) - parent = proto.Field( - proto.STRING, - number=6, - ) - - -class DeleteNodePoolRequest(proto.Message): - r"""DeleteNodePoolRequest deletes a node pool for a cluster. - Attributes: - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the name field. - cluster_id (str): - Deprecated. The name of the cluster. - This field has been deprecated and replaced by - the name field. - node_pool_id (str): - Deprecated. The name of the node pool to - delete. This field has been deprecated and - replaced by the name field. - name (str): - The name (project, location, cluster, node pool id) of the - node pool to delete. Specified in the format - ``projects/*/locations/*/clusters/*/nodePools/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster_id = proto.Field( - proto.STRING, - number=3, - ) - node_pool_id = proto.Field( - proto.STRING, - number=4, - ) - name = proto.Field( - proto.STRING, - number=6, - ) - - -class ListNodePoolsRequest(proto.Message): - r"""ListNodePoolsRequest lists the node pool(s) for a cluster. - Attributes: - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the parent - field. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the parent field. - cluster_id (str): - Deprecated. The name of the cluster. - This field has been deprecated and replaced by - the parent field. - parent (str): - The parent (project, location, cluster id) where the node - pools will be listed. Specified in the format - ``projects/*/locations/*/clusters/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster_id = proto.Field( - proto.STRING, - number=3, - ) - parent = proto.Field( - proto.STRING, - number=5, - ) - - -class GetNodePoolRequest(proto.Message): - r"""GetNodePoolRequest retrieves a node pool for a cluster. - Attributes: - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the name field. - cluster_id (str): - Deprecated. The name of the cluster. - This field has been deprecated and replaced by - the name field. - node_pool_id (str): - Deprecated. The name of the node pool. - This field has been deprecated and replaced by - the name field. - name (str): - The name (project, location, cluster, node pool id) of the - node pool to get. Specified in the format - ``projects/*/locations/*/clusters/*/nodePools/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster_id = proto.Field( - proto.STRING, - number=3, - ) - node_pool_id = proto.Field( - proto.STRING, - number=4, - ) - name = proto.Field( - proto.STRING, - number=6, - ) - - -class NodePool(proto.Message): - r"""NodePool contains the name and configuration for a cluster's - node pool. Node pools are a set of nodes (i.e. VM's), with a - common configuration and specification, under the control of the - cluster master. They may have a set of Kubernetes labels applied - to them, which may be used to reference them during pod - scheduling. They may also be resized up or down, to accommodate - the workload. - - Attributes: - name (str): - The name of the node pool. - config (google.container_v1.types.NodeConfig): - The node configuration of the pool. - initial_node_count (int): - The initial node count for the pool. You must ensure that - your Compute Engine `resource - quota `__ is - sufficient for this number of instances. You must also have - available firewall and routes quota. - locations (Sequence[str]): - The list of Google Compute Engine - `zones `__ - in which the NodePool's nodes should be located. - - If this value is unspecified during node pool creation, the - `Cluster.Locations `__ - value will be used, instead. - - Warning: changing node pool locations will result in nodes - being added and/or removed. - self_link (str): - [Output only] Server-defined URL for the resource. - version (str): - The version of the Kubernetes of this node. - instance_group_urls (Sequence[str]): - [Output only] The resource URLs of the `managed instance - groups `__ - associated with this node pool. - status (google.container_v1.types.NodePool.Status): - [Output only] The status of the nodes in this pool instance. - status_message (str): - [Output only] Deprecated. Use conditions instead. Additional - information about the current status of this node pool - instance, if available. - autoscaling (google.container_v1.types.NodePoolAutoscaling): - Autoscaler configuration for this NodePool. - Autoscaler is enabled only if a valid - configuration is present. - management (google.container_v1.types.NodeManagement): - NodeManagement configuration for this - NodePool. - max_pods_constraint (google.container_v1.types.MaxPodsConstraint): - The constraint on the maximum number of pods - that can be run simultaneously on a node in the - node pool. - conditions (Sequence[google.container_v1.types.StatusCondition]): - Which conditions caused the current node pool - state. - pod_ipv4_cidr_size (int): - [Output only] The pod CIDR block size per node in this node - pool. - upgrade_settings (google.container_v1.types.NodePool.UpgradeSettings): - Upgrade settings control disruption and speed - of the upgrade. - """ - class Status(proto.Enum): - r"""The current status of the node pool instance.""" - STATUS_UNSPECIFIED = 0 - PROVISIONING = 1 - RUNNING = 2 - RUNNING_WITH_ERROR = 3 - RECONCILING = 4 - STOPPING = 5 - ERROR = 6 - - class UpgradeSettings(proto.Message): - r"""These upgrade settings control the level of parallelism and - the level of disruption caused by an upgrade. - - maxUnavailable controls the number of nodes that can be - simultaneously unavailable. - - maxSurge controls the number of additional nodes that can be - added to the node pool temporarily for the time of the upgrade - to increase the number of available nodes. - - (maxUnavailable + maxSurge) determines the level of parallelism - (how many nodes are being upgraded at the same time). - - Note: upgrades inevitably introduce some disruption since - workloads need to be moved from old nodes to new, upgraded ones. - Even if maxUnavailable=0, this holds true. (Disruption stays - within the limits of PodDisruptionBudget, if it is configured.) - - Consider a hypothetical node pool with 5 nodes having - maxSurge=2, maxUnavailable=1. This means the upgrade process - upgrades 3 nodes simultaneously. It creates 2 additional - (upgraded) nodes, then it brings down 3 old (not yet upgraded) - nodes at the same time. This ensures that there are always at - least 4 nodes available. - - Attributes: - max_surge (int): - The maximum number of nodes that can be - created beyond the current size of the node pool - during the upgrade process. - max_unavailable (int): - The maximum number of nodes that can be - simultaneously unavailable during the upgrade - process. A node is considered available if its - status is Ready. - """ - - max_surge = proto.Field( - proto.INT32, - number=1, - ) - max_unavailable = proto.Field( - proto.INT32, - number=2, - ) - - name = proto.Field( - proto.STRING, - number=1, - ) - config = proto.Field( - proto.MESSAGE, - number=2, - message='NodeConfig', - ) - initial_node_count = proto.Field( - proto.INT32, - number=3, - ) - locations = proto.RepeatedField( - proto.STRING, - number=13, - ) - self_link = proto.Field( - proto.STRING, - number=100, - ) - version = proto.Field( - proto.STRING, - number=101, - ) - instance_group_urls = proto.RepeatedField( - proto.STRING, - number=102, - ) - status = proto.Field( - proto.ENUM, - number=103, - enum=Status, - ) - status_message = proto.Field( - proto.STRING, - number=104, - ) - autoscaling = proto.Field( - proto.MESSAGE, - number=4, - message='NodePoolAutoscaling', - ) - management = proto.Field( - proto.MESSAGE, - number=5, - message='NodeManagement', - ) - max_pods_constraint = proto.Field( - proto.MESSAGE, - number=6, - message='MaxPodsConstraint', - ) - conditions = proto.RepeatedField( - proto.MESSAGE, - number=105, - message='StatusCondition', - ) - pod_ipv4_cidr_size = proto.Field( - proto.INT32, - number=7, - ) - upgrade_settings = proto.Field( - proto.MESSAGE, - number=107, - message=UpgradeSettings, - ) - - -class NodeManagement(proto.Message): - r"""NodeManagement defines the set of node management services - turned on for the node pool. - - Attributes: - auto_upgrade (bool): - A flag that specifies whether node auto- - pgrade is enabled for the node pool. If enabled, - node auto-upgrade helps keep the nodes in your - node pool up to date with the latest release - version of Kubernetes. - auto_repair (bool): - A flag that specifies whether the node auto- - epair is enabled for the node pool. If enabled, - the nodes in this node pool will be monitored - and, if they fail health checks too many times, - an automatic repair action will be triggered. - upgrade_options (google.container_v1.types.AutoUpgradeOptions): - Specifies the Auto Upgrade knobs for the node - pool. - """ - - auto_upgrade = proto.Field( - proto.BOOL, - number=1, - ) - auto_repair = proto.Field( - proto.BOOL, - number=2, - ) - upgrade_options = proto.Field( - proto.MESSAGE, - number=10, - message='AutoUpgradeOptions', - ) - - -class AutoUpgradeOptions(proto.Message): - r"""AutoUpgradeOptions defines the set of options for the user to - control how the Auto Upgrades will proceed. - - Attributes: - auto_upgrade_start_time (str): - [Output only] This field is set when upgrades are about to - commence with the approximate start time for the upgrades, - in `RFC3339 `__ text - format. - description (str): - [Output only] This field is set when upgrades are about to - commence with the description of the upgrade. - """ - - auto_upgrade_start_time = proto.Field( - proto.STRING, - number=1, - ) - description = proto.Field( - proto.STRING, - number=2, - ) - - -class MaintenancePolicy(proto.Message): - r"""MaintenancePolicy defines the maintenance policy to be used - for the cluster. - - Attributes: - window (google.container_v1.types.MaintenanceWindow): - Specifies the maintenance window in which - maintenance may be performed. - resource_version (str): - A hash identifying the version of this policy, so that - updates to fields of the policy won't accidentally undo - intermediate changes (and so that users of the API unaware - of some fields won't accidentally remove other fields). Make - a ``get()`` request to the cluster to get the current - resource version and include it with requests to set the - policy. - """ - - window = proto.Field( - proto.MESSAGE, - number=1, - message='MaintenanceWindow', - ) - resource_version = proto.Field( - proto.STRING, - number=3, - ) - - -class MaintenanceWindow(proto.Message): - r"""MaintenanceWindow defines the maintenance window to be used - for the cluster. - - Attributes: - daily_maintenance_window (google.container_v1.types.DailyMaintenanceWindow): - DailyMaintenanceWindow specifies a daily - maintenance operation window. - recurring_window (google.container_v1.types.RecurringTimeWindow): - RecurringWindow specifies some number of - recurring time periods for maintenance to occur. - The time windows may be overlapping. If no - maintenance windows are set, maintenance can - occur at any time. - maintenance_exclusions (Sequence[google.container_v1.types.MaintenanceWindow.MaintenanceExclusionsEntry]): - Exceptions to maintenance window. Non- - mergency maintenance should not occur in these - windows. - """ - - daily_maintenance_window = proto.Field( - proto.MESSAGE, - number=2, - oneof='policy', - message='DailyMaintenanceWindow', - ) - recurring_window = proto.Field( - proto.MESSAGE, - number=3, - oneof='policy', - message='RecurringTimeWindow', - ) - maintenance_exclusions = proto.MapField( - proto.STRING, - proto.MESSAGE, - number=4, - message='TimeWindow', - ) - - -class TimeWindow(proto.Message): - r"""Represents an arbitrary window of time. - Attributes: - start_time (google.protobuf.timestamp_pb2.Timestamp): - The time that the window first starts. - end_time (google.protobuf.timestamp_pb2.Timestamp): - The time that the window ends. The end time - should take place after the start time. - """ - - start_time = proto.Field( - proto.MESSAGE, - number=1, - message=timestamp_pb2.Timestamp, - ) - end_time = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - - -class RecurringTimeWindow(proto.Message): - r"""Represents an arbitrary window of time that recurs. - Attributes: - window (google.container_v1.types.TimeWindow): - The window of the first recurrence. - recurrence (str): - An RRULE - (https://tools.ietf.org/html/rfc5545#section-3.8.5.3) for - how this window reccurs. They go on for the span of time - between the start and end time. - - For example, to have something repeat every weekday, you'd - use: ``FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR`` - - To repeat some window daily (equivalent to the - DailyMaintenanceWindow): ``FREQ=DAILY`` - - For the first weekend of every month: - ``FREQ=MONTHLY;BYSETPOS=1;BYDAY=SA,SU`` - - This specifies how frequently the window starts. Eg, if you - wanted to have a 9-5 UTC-4 window every weekday, you'd use - something like: - - :: - - start time = 2019-01-01T09:00:00-0400 - end time = 2019-01-01T17:00:00-0400 - recurrence = FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR - - Windows can span multiple days. Eg, to make the window - encompass every weekend from midnight Saturday till the last - minute of Sunday UTC: - - :: - - start time = 2019-01-05T00:00:00Z - end time = 2019-01-07T23:59:00Z - recurrence = FREQ=WEEKLY;BYDAY=SA - - Note the start and end time's specific dates are largely - arbitrary except to specify duration of the window and when - it first starts. The FREQ values of HOURLY, MINUTELY, and - SECONDLY are not supported. - """ - - window = proto.Field( - proto.MESSAGE, - number=1, - message='TimeWindow', - ) - recurrence = proto.Field( - proto.STRING, - number=2, - ) - - -class DailyMaintenanceWindow(proto.Message): - r"""Time window specified for daily maintenance operations. - Attributes: - start_time (str): - Time within the maintenance window to start the maintenance - operations. Time format should be in - `RFC3339 `__ format - "HH:MM", where HH : [00-23] and MM : [00-59] GMT. - duration (str): - [Output only] Duration of the time window, automatically - chosen to be smallest possible in the given scenario. - Duration will be in - `RFC3339 `__ format - "PTnHnMnS". - """ - - start_time = proto.Field( - proto.STRING, - number=2, - ) - duration = proto.Field( - proto.STRING, - number=3, - ) - - -class SetNodePoolManagementRequest(proto.Message): - r"""SetNodePoolManagementRequest sets the node management - properties of a node pool. - - Attributes: - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the name field. - cluster_id (str): - Deprecated. The name of the cluster to - update. This field has been deprecated and - replaced by the name field. - node_pool_id (str): - Deprecated. The name of the node pool to - update. This field has been deprecated and - replaced by the name field. - management (google.container_v1.types.NodeManagement): - Required. NodeManagement configuration for - the node pool. - name (str): - The name (project, location, cluster, node pool id) of the - node pool to set management properties. Specified in the - format ``projects/*/locations/*/clusters/*/nodePools/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster_id = proto.Field( - proto.STRING, - number=3, - ) - node_pool_id = proto.Field( - proto.STRING, - number=4, - ) - management = proto.Field( - proto.MESSAGE, - number=5, - message='NodeManagement', - ) - name = proto.Field( - proto.STRING, - number=7, - ) - - -class SetNodePoolSizeRequest(proto.Message): - r"""SetNodePoolSizeRequest sets the size a node - pool. - - Attributes: - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the name field. - cluster_id (str): - Deprecated. The name of the cluster to - update. This field has been deprecated and - replaced by the name field. - node_pool_id (str): - Deprecated. The name of the node pool to - update. This field has been deprecated and - replaced by the name field. - node_count (int): - Required. The desired node count for the - pool. - name (str): - The name (project, location, cluster, node pool id) of the - node pool to set size. Specified in the format - ``projects/*/locations/*/clusters/*/nodePools/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster_id = proto.Field( - proto.STRING, - number=3, - ) - node_pool_id = proto.Field( - proto.STRING, - number=4, - ) - node_count = proto.Field( - proto.INT32, - number=5, - ) - name = proto.Field( - proto.STRING, - number=7, - ) - - -class RollbackNodePoolUpgradeRequest(proto.Message): - r"""RollbackNodePoolUpgradeRequest rollbacks the previously - Aborted or Failed NodePool upgrade. This will be an no-op if the - last upgrade successfully completed. - - Attributes: - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the name field. - cluster_id (str): - Deprecated. The name of the cluster to - rollback. This field has been deprecated and - replaced by the name field. - node_pool_id (str): - Deprecated. The name of the node pool to - rollback. This field has been deprecated and - replaced by the name field. - name (str): - The name (project, location, cluster, node pool id) of the - node poll to rollback upgrade. Specified in the format - ``projects/*/locations/*/clusters/*/nodePools/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster_id = proto.Field( - proto.STRING, - number=3, - ) - node_pool_id = proto.Field( - proto.STRING, - number=4, - ) - name = proto.Field( - proto.STRING, - number=6, - ) - - -class ListNodePoolsResponse(proto.Message): - r"""ListNodePoolsResponse is the result of ListNodePoolsRequest. - Attributes: - node_pools (Sequence[google.container_v1.types.NodePool]): - A list of node pools for a cluster. - """ - - node_pools = proto.RepeatedField( - proto.MESSAGE, - number=1, - message='NodePool', - ) - - -class ClusterAutoscaling(proto.Message): - r"""ClusterAutoscaling contains global, per-cluster information - required by Cluster Autoscaler to automatically adjust the size - of the cluster and create/delete - node pools based on the current needs. - - Attributes: - enable_node_autoprovisioning (bool): - Enables automatic node pool creation and - deletion. - resource_limits (Sequence[google.container_v1.types.ResourceLimit]): - Contains global constraints regarding minimum - and maximum amount of resources in the cluster. - autoprovisioning_node_pool_defaults (google.container_v1.types.AutoprovisioningNodePoolDefaults): - AutoprovisioningNodePoolDefaults contains - defaults for a node pool created by NAP. - autoprovisioning_locations (Sequence[str]): - The list of Google Compute Engine - `zones `__ - in which the NodePool's nodes can be created by NAP. - """ - - enable_node_autoprovisioning = proto.Field( - proto.BOOL, - number=1, - ) - resource_limits = proto.RepeatedField( - proto.MESSAGE, - number=2, - message='ResourceLimit', - ) - autoprovisioning_node_pool_defaults = proto.Field( - proto.MESSAGE, - number=4, - message='AutoprovisioningNodePoolDefaults', - ) - autoprovisioning_locations = proto.RepeatedField( - proto.STRING, - number=5, - ) - - -class AutoprovisioningNodePoolDefaults(proto.Message): - r"""AutoprovisioningNodePoolDefaults contains defaults for a node - pool created by NAP. - - Attributes: - oauth_scopes (Sequence[str]): - Scopes that are used by NAP when creating - node pools. - service_account (str): - The Google Cloud Platform Service Account to - be used by the node VMs. - upgrade_settings (google.container_v1.types.NodePool.UpgradeSettings): - Specifies the upgrade settings for NAP - created node pools - management (google.container_v1.types.NodeManagement): - Specifies the node management options for NAP - created node-pools. - min_cpu_platform (str): - Minimum CPU platform to be used for NAP created node pools. - The instance may be scheduled on the specified or newer CPU - platform. Applicable values are the friendly names of CPU - platforms, such as minCpuPlatform: Intel Haswell or - minCpuPlatform: Intel Sandy Bridge. For more information, - read `how to specify min CPU - platform `__ - To unset the min cpu platform field pass "automatic" as - field value. - disk_size_gb (int): - Size of the disk attached to each node, - specified in GB. The smallest allowed disk size - is 10GB. - If unspecified, the default disk size is 100GB. - disk_type (str): - Type of the disk attached to each node (e.g. - 'pd-standard', 'pd-ssd' or 'pd-balanced') - - If unspecified, the default disk type is 'pd- - standard' - shielded_instance_config (google.container_v1.types.ShieldedInstanceConfig): - Shielded Instance options. - boot_disk_kms_key (str): - The Customer Managed Encryption Key used to encrypt the boot - disk attached to each node in the node pool. This should be - of the form - projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. - For more information about protecting resources with Cloud - KMS Keys please see: - https://cloud.google.com/compute/docs/disks/customer-managed-encryption - image_type (str): - The image type to use for NAP created node. - """ - - oauth_scopes = proto.RepeatedField( - proto.STRING, - number=1, - ) - service_account = proto.Field( - proto.STRING, - number=2, - ) - upgrade_settings = proto.Field( - proto.MESSAGE, - number=3, - message='NodePool.UpgradeSettings', - ) - management = proto.Field( - proto.MESSAGE, - number=4, - message='NodeManagement', - ) - min_cpu_platform = proto.Field( - proto.STRING, - number=5, - ) - disk_size_gb = proto.Field( - proto.INT32, - number=6, - ) - disk_type = proto.Field( - proto.STRING, - number=7, - ) - shielded_instance_config = proto.Field( - proto.MESSAGE, - number=8, - message='ShieldedInstanceConfig', - ) - boot_disk_kms_key = proto.Field( - proto.STRING, - number=9, - ) - image_type = proto.Field( - proto.STRING, - number=10, - ) - - -class ResourceLimit(proto.Message): - r"""Contains information about amount of some resource in the - cluster. For memory, value should be in GB. - - Attributes: - resource_type (str): - Resource name "cpu", "memory" or gpu-specific - string. - minimum (int): - Minimum amount of the resource in the - cluster. - maximum (int): - Maximum amount of the resource in the - cluster. - """ - - resource_type = proto.Field( - proto.STRING, - number=1, - ) - minimum = proto.Field( - proto.INT64, - number=2, - ) - maximum = proto.Field( - proto.INT64, - number=3, - ) - - -class NodePoolAutoscaling(proto.Message): - r"""NodePoolAutoscaling contains information required by cluster - autoscaler to adjust the size of the node pool to the current - cluster usage. - - Attributes: - enabled (bool): - Is autoscaling enabled for this node pool. - min_node_count (int): - Minimum number of nodes in the NodePool. Must be >= 1 and <= - max_node_count. - max_node_count (int): - Maximum number of nodes in the NodePool. Must be >= - min_node_count. There has to enough quota to scale up the - cluster. - autoprovisioned (bool): - Can this node pool be deleted automatically. - """ - - enabled = proto.Field( - proto.BOOL, - number=1, - ) - min_node_count = proto.Field( - proto.INT32, - number=2, - ) - max_node_count = proto.Field( - proto.INT32, - number=3, - ) - autoprovisioned = proto.Field( - proto.BOOL, - number=4, - ) - - -class SetLabelsRequest(proto.Message): - r"""SetLabelsRequest sets the Google Cloud Platform labels on a - Google Container Engine cluster, which will in turn set them for - Google Compute Engine resources used by that cluster - - Attributes: - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the name field. - cluster_id (str): - Deprecated. The name of the cluster. - This field has been deprecated and replaced by - the name field. - resource_labels (Sequence[google.container_v1.types.SetLabelsRequest.ResourceLabelsEntry]): - Required. The labels to set for that cluster. - label_fingerprint (str): - Required. The fingerprint of the previous set of labels for - this resource, used to detect conflicts. The fingerprint is - initially generated by Kubernetes Engine and changes after - every request to modify or update labels. You must always - provide an up-to-date fingerprint hash when updating or - changing labels. Make a ``get()`` request to the resource to - get the latest fingerprint. - name (str): - The name (project, location, cluster id) of the cluster to - set labels. Specified in the format - ``projects/*/locations/*/clusters/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster_id = proto.Field( - proto.STRING, - number=3, - ) - resource_labels = proto.MapField( - proto.STRING, - proto.STRING, - number=4, - ) - label_fingerprint = proto.Field( - proto.STRING, - number=5, - ) - name = proto.Field( - proto.STRING, - number=7, - ) - - -class SetLegacyAbacRequest(proto.Message): - r"""SetLegacyAbacRequest enables or disables the ABAC - authorization mechanism for a cluster. - - Attributes: - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the name field. - cluster_id (str): - Deprecated. The name of the cluster to - update. This field has been deprecated and - replaced by the name field. - enabled (bool): - Required. Whether ABAC authorization will be - enabled in the cluster. - name (str): - The name (project, location, cluster id) of the cluster to - set legacy abac. Specified in the format - ``projects/*/locations/*/clusters/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster_id = proto.Field( - proto.STRING, - number=3, - ) - enabled = proto.Field( - proto.BOOL, - number=4, - ) - name = proto.Field( - proto.STRING, - number=6, - ) - - -class StartIPRotationRequest(proto.Message): - r"""StartIPRotationRequest creates a new IP for the cluster and - then performs a node upgrade on each node pool to point to the - new IP. - - Attributes: - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the name field. - cluster_id (str): - Deprecated. The name of the cluster. - This field has been deprecated and replaced by - the name field. - name (str): - The name (project, location, cluster id) of the cluster to - start IP rotation. Specified in the format - ``projects/*/locations/*/clusters/*``. - rotate_credentials (bool): - Whether to rotate credentials during IP - rotation. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster_id = proto.Field( - proto.STRING, - number=3, - ) - name = proto.Field( - proto.STRING, - number=6, - ) - rotate_credentials = proto.Field( - proto.BOOL, - number=7, - ) - - -class CompleteIPRotationRequest(proto.Message): - r"""CompleteIPRotationRequest moves the cluster master back into - single-IP mode. - - Attributes: - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the name field. - cluster_id (str): - Deprecated. The name of the cluster. - This field has been deprecated and replaced by - the name field. - name (str): - The name (project, location, cluster id) of the cluster to - complete IP rotation. Specified in the format - ``projects/*/locations/*/clusters/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster_id = proto.Field( - proto.STRING, - number=3, - ) - name = proto.Field( - proto.STRING, - number=7, - ) - - -class AcceleratorConfig(proto.Message): - r"""AcceleratorConfig represents a Hardware Accelerator request. - Attributes: - accelerator_count (int): - The number of the accelerator cards exposed - to an instance. - accelerator_type (str): - The accelerator type resource name. List of supported - accelerators - `here `__ - """ - - accelerator_count = proto.Field( - proto.INT64, - number=1, - ) - accelerator_type = proto.Field( - proto.STRING, - number=2, - ) - - -class WorkloadMetadataConfig(proto.Message): - r"""WorkloadMetadataConfig defines the metadata configuration to - expose to workloads on the node pool. - - Attributes: - mode (google.container_v1.types.WorkloadMetadataConfig.Mode): - Mode is the configuration for how to expose - metadata to workloads running on the node pool. - """ - class Mode(proto.Enum): - r"""Mode is the configuration for how to expose metadata to - workloads running on the node. - """ - MODE_UNSPECIFIED = 0 - GCE_METADATA = 1 - GKE_METADATA = 2 - - mode = proto.Field( - proto.ENUM, - number=2, - enum=Mode, - ) - - -class SetNetworkPolicyRequest(proto.Message): - r"""SetNetworkPolicyRequest enables/disables network policy for a - cluster. - - Attributes: - project_id (str): - Deprecated. The Google Developers Console `project ID or - project - number `__. - This field has been deprecated and replaced by the name - field. - zone (str): - Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the name field. - cluster_id (str): - Deprecated. The name of the cluster. - This field has been deprecated and replaced by - the name field. - network_policy (google.container_v1.types.NetworkPolicy): - Required. Configuration options for the - NetworkPolicy feature. - name (str): - The name (project, location, cluster id) of the cluster to - set networking policy. Specified in the format - ``projects/*/locations/*/clusters/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster_id = proto.Field( - proto.STRING, - number=3, - ) - network_policy = proto.Field( - proto.MESSAGE, - number=4, - message='NetworkPolicy', - ) - name = proto.Field( - proto.STRING, - number=6, - ) - - -class SetMaintenancePolicyRequest(proto.Message): - r"""SetMaintenancePolicyRequest sets the maintenance policy for a - cluster. - - Attributes: - project_id (str): - Required. The Google Developers Console `project ID or - project - number `__. - zone (str): - Required. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. - cluster_id (str): - Required. The name of the cluster to update. - maintenance_policy (google.container_v1.types.MaintenancePolicy): - Required. The maintenance policy to be set - for the cluster. An empty field clears the - existing maintenance policy. - name (str): - The name (project, location, cluster id) of the cluster to - set maintenance policy. Specified in the format - ``projects/*/locations/*/clusters/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster_id = proto.Field( - proto.STRING, - number=3, - ) - maintenance_policy = proto.Field( - proto.MESSAGE, - number=4, - message='MaintenancePolicy', - ) - name = proto.Field( - proto.STRING, - number=5, - ) - - -class StatusCondition(proto.Message): - r"""StatusCondition describes why a cluster or a node pool has a - certain status (e.g., ERROR or DEGRADED). - - Attributes: - code (google.container_v1.types.StatusCondition.Code): - Machine-friendly representation of the - condition - message (str): - Human-friendly representation of the - condition - """ - class Code(proto.Enum): - r"""Code for each condition""" - UNKNOWN = 0 - GCE_STOCKOUT = 1 - GKE_SERVICE_ACCOUNT_DELETED = 2 - GCE_QUOTA_EXCEEDED = 3 - SET_BY_OPERATOR = 4 - CLOUD_KMS_KEY_ERROR = 7 - - code = proto.Field( - proto.ENUM, - number=1, - enum=Code, - ) - message = proto.Field( - proto.STRING, - number=2, - ) - - -class NetworkConfig(proto.Message): - r"""NetworkConfig reports the relative names of network & - subnetwork. - - Attributes: - network (str): - Output only. The relative name of the Google Compute Engine - [network]`google.container.v1.NetworkConfig.network `__ - to which the cluster is connected. Example: - projects/my-project/global/networks/my-network - subnetwork (str): - Output only. The relative name of the Google Compute Engine - `subnetwork `__ - to which the cluster is connected. Example: - projects/my-project/regions/us-central1/subnetworks/my-subnet - enable_intra_node_visibility (bool): - Whether Intra-node visibility is enabled for - this cluster. This makes same node pod to pod - traffic visible for VPC network. - default_snat_status (google.container_v1.types.DefaultSnatStatus): - Whether the cluster disables default in-node sNAT rules. - In-node sNAT rules will be disabled when default_snat_status - is disabled. When disabled is set to false, default IP - masquerade rules will be applied to the nodes to prevent - sNAT on cluster internal traffic. - """ - - network = proto.Field( - proto.STRING, - number=1, - ) - subnetwork = proto.Field( - proto.STRING, - number=2, - ) - enable_intra_node_visibility = proto.Field( - proto.BOOL, - number=5, - ) - default_snat_status = proto.Field( - proto.MESSAGE, - number=7, - message='DefaultSnatStatus', - ) - - -class GetOpenIDConfigRequest(proto.Message): - r"""GetOpenIDConfigRequest gets the OIDC discovery document for - the cluster. See the OpenID Connect Discovery 1.0 specification - for details. - - Attributes: - parent (str): - The cluster (project, location, cluster id) to get the - discovery document for. Specified in the format - ``projects/*/locations/*/clusters/*``. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - - -class GetOpenIDConfigResponse(proto.Message): - r"""GetOpenIDConfigResponse is an OIDC discovery document for the - cluster. See the OpenID Connect Discovery 1.0 specification for - details. - - Attributes: - issuer (str): - OIDC Issuer. - jwks_uri (str): - JSON Web Key uri. - response_types_supported (Sequence[str]): - Supported response types. - subject_types_supported (Sequence[str]): - Supported subject types. - id_token_signing_alg_values_supported (Sequence[str]): - supported ID Token signing Algorithms. - claims_supported (Sequence[str]): - Supported claims. - grant_types (Sequence[str]): - Supported grant types. - """ - - issuer = proto.Field( - proto.STRING, - number=1, - ) - jwks_uri = proto.Field( - proto.STRING, - number=2, - ) - response_types_supported = proto.RepeatedField( - proto.STRING, - number=3, - ) - subject_types_supported = proto.RepeatedField( - proto.STRING, - number=4, - ) - id_token_signing_alg_values_supported = proto.RepeatedField( - proto.STRING, - number=5, - ) - claims_supported = proto.RepeatedField( - proto.STRING, - number=6, - ) - grant_types = proto.RepeatedField( - proto.STRING, - number=7, - ) - - -class GetJSONWebKeysRequest(proto.Message): - r"""GetJSONWebKeysRequest gets the public component of the keys used by - the cluster to sign token requests. This will be the jwks_uri for - the discover document returned by getOpenIDConfig. See the OpenID - Connect Discovery 1.0 specification for details. - - Attributes: - parent (str): - The cluster (project, location, cluster id) to get keys for. - Specified in the format - ``projects/*/locations/*/clusters/*``. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - - -class Jwk(proto.Message): - r"""Jwk is a JSON Web Key as specified in RFC 7517 - Attributes: - kty (str): - Key Type. - alg (str): - Algorithm. - use (str): - Permitted uses for the public keys. - kid (str): - Key ID. - n (str): - Used for RSA keys. - e (str): - Used for RSA keys. - x (str): - Used for ECDSA keys. - y (str): - Used for ECDSA keys. - crv (str): - Used for ECDSA keys. - """ - - kty = proto.Field( - proto.STRING, - number=1, - ) - alg = proto.Field( - proto.STRING, - number=2, - ) - use = proto.Field( - proto.STRING, - number=3, - ) - kid = proto.Field( - proto.STRING, - number=4, - ) - n = proto.Field( - proto.STRING, - number=5, - ) - e = proto.Field( - proto.STRING, - number=6, - ) - x = proto.Field( - proto.STRING, - number=7, - ) - y = proto.Field( - proto.STRING, - number=8, - ) - crv = proto.Field( - proto.STRING, - number=9, - ) - - -class GetJSONWebKeysResponse(proto.Message): - r"""GetJSONWebKeysResponse is a valid JSON Web Key Set as - specififed in rfc 7517 - - Attributes: - keys (Sequence[google.container_v1.types.Jwk]): - The public component of the keys used by the - cluster to sign token requests. - """ - - keys = proto.RepeatedField( - proto.MESSAGE, - number=1, - message='Jwk', - ) - - -class ReleaseChannel(proto.Message): - r"""ReleaseChannel indicates which release channel a cluster is - subscribed to. Release channels are arranged in order of risk. - When a cluster is subscribed to a release channel, Google - maintains both the master version and the node version. Node - auto-upgrade defaults to true and cannot be disabled. - - Attributes: - channel (google.container_v1.types.ReleaseChannel.Channel): - channel specifies which release channel the - cluster is subscribed to. - """ - class Channel(proto.Enum): - r"""Possible values for 'channel'.""" - UNSPECIFIED = 0 - RAPID = 1 - REGULAR = 2 - STABLE = 3 - - channel = proto.Field( - proto.ENUM, - number=1, - enum=Channel, - ) - - -class IntraNodeVisibilityConfig(proto.Message): - r"""IntraNodeVisibilityConfig contains the desired config of the - intra-node visibility on this cluster. - - Attributes: - enabled (bool): - Enables intra node visibility for this - cluster. - """ - - enabled = proto.Field( - proto.BOOL, - number=1, - ) - - -class MaxPodsConstraint(proto.Message): - r"""Constraints applied to pods. - Attributes: - max_pods_per_node (int): - Constraint enforced on the max num of pods - per node. - """ - - max_pods_per_node = proto.Field( - proto.INT64, - number=1, - ) - - -class WorkloadIdentityConfig(proto.Message): - r"""Configuration for the use of Kubernetes Service Accounts in - GCP IAM policies. - - Attributes: - workload_pool (str): - The workload pool to attach all Kubernetes - service accounts to. - """ - - workload_pool = proto.Field( - proto.STRING, - number=2, - ) - - -class DatabaseEncryption(proto.Message): - r"""Configuration of etcd encryption. - Attributes: - state (google.container_v1.types.DatabaseEncryption.State): - Denotes the state of etcd encryption. - key_name (str): - Name of CloudKMS key to use for the - encryption of secrets in etcd. Ex. projects/my- - project/locations/global/keyRings/my- - ring/cryptoKeys/my-key - """ - class State(proto.Enum): - r"""State of etcd encryption.""" - UNKNOWN = 0 - ENCRYPTED = 1 - DECRYPTED = 2 - - state = proto.Field( - proto.ENUM, - number=2, - enum=State, - ) - key_name = proto.Field( - proto.STRING, - number=1, - ) - - -class ListUsableSubnetworksRequest(proto.Message): - r"""ListUsableSubnetworksRequest requests the list of usable - subnetworks available to a user for creating clusters. - - Attributes: - parent (str): - The parent project where subnetworks are usable. Specified - in the format ``projects/*``. - filter (str): - Filtering currently only supports equality on the - networkProjectId and must be in the form: - "networkProjectId=[PROJECTID]", where ``networkProjectId`` - is the project which owns the listed subnetworks. This - defaults to the parent project ID. - page_size (int): - The max number of results per page that should be returned. - If the number of available results is larger than - ``page_size``, a ``next_page_token`` is returned which can - be used to get the next page of results in subsequent - requests. Acceptable values are 0 to 500, inclusive. - (Default: 500) - page_token (str): - Specifies a page token to use. Set this to - the nextPageToken returned by previous list - requests to get the next page of results. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - - -class ListUsableSubnetworksResponse(proto.Message): - r"""ListUsableSubnetworksResponse is the response of - ListUsableSubnetworksRequest. - - Attributes: - subnetworks (Sequence[google.container_v1.types.UsableSubnetwork]): - A list of usable subnetworks in the specified - network project. - next_page_token (str): - This token allows you to get the next page of results for - list requests. If the number of results is larger than - ``page_size``, use the ``next_page_token`` as a value for - the query parameter ``page_token`` in the next request. The - value will become empty when there are no more pages. - """ - - @property - def raw_page(self): - return self - - subnetworks = proto.RepeatedField( - proto.MESSAGE, - number=1, - message='UsableSubnetwork', - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UsableSubnetworkSecondaryRange(proto.Message): - r"""Secondary IP range of a usable subnetwork. - Attributes: - range_name (str): - The name associated with this subnetwork - secondary range, used when adding an alias IP - range to a VM instance. - ip_cidr_range (str): - The range of IP addresses belonging to this - subnetwork secondary range. - status (google.container_v1.types.UsableSubnetworkSecondaryRange.Status): - This field is to determine the status of the - secondary range programmably. - """ - class Status(proto.Enum): - r"""Status shows the current usage of a secondary IP range.""" - UNKNOWN = 0 - UNUSED = 1 - IN_USE_SERVICE = 2 - IN_USE_SHAREABLE_POD = 3 - IN_USE_MANAGED_POD = 4 - - range_name = proto.Field( - proto.STRING, - number=1, - ) - ip_cidr_range = proto.Field( - proto.STRING, - number=2, - ) - status = proto.Field( - proto.ENUM, - number=3, - enum=Status, - ) - - -class UsableSubnetwork(proto.Message): - r"""UsableSubnetwork resource returns the subnetwork name, its - associated network and the primary CIDR range. - - Attributes: - subnetwork (str): - Subnetwork Name. - Example: projects/my-project/regions/us- - central1/subnetworks/my-subnet - network (str): - Network Name. - Example: projects/my-project/global/networks/my- - network - ip_cidr_range (str): - The range of internal addresses that are - owned by this subnetwork. - secondary_ip_ranges (Sequence[google.container_v1.types.UsableSubnetworkSecondaryRange]): - Secondary IP ranges. - status_message (str): - A human readable status message representing the reasons for - cases where the caller cannot use the secondary ranges under - the subnet. For example if the secondary_ip_ranges is empty - due to a permission issue, an insufficient permission - message will be given by status_message. - """ - - subnetwork = proto.Field( - proto.STRING, - number=1, - ) - network = proto.Field( - proto.STRING, - number=2, - ) - ip_cidr_range = proto.Field( - proto.STRING, - number=3, - ) - secondary_ip_ranges = proto.RepeatedField( - proto.MESSAGE, - number=4, - message='UsableSubnetworkSecondaryRange', - ) - status_message = proto.Field( - proto.STRING, - number=5, - ) - - -class ResourceUsageExportConfig(proto.Message): - r"""Configuration for exporting cluster resource usages. - Attributes: - bigquery_destination (google.container_v1.types.ResourceUsageExportConfig.BigQueryDestination): - Configuration to use BigQuery as usage export - destination. - enable_network_egress_metering (bool): - Whether to enable network egress metering for - this cluster. If enabled, a daemonset will be - created in the cluster to meter network egress - traffic. - consumption_metering_config (google.container_v1.types.ResourceUsageExportConfig.ConsumptionMeteringConfig): - Configuration to enable resource consumption - metering. - """ - - class BigQueryDestination(proto.Message): - r"""Parameters for using BigQuery as the destination of resource - usage export. - - Attributes: - dataset_id (str): - The ID of a BigQuery Dataset. - """ - - dataset_id = proto.Field( - proto.STRING, - number=1, - ) - - class ConsumptionMeteringConfig(proto.Message): - r"""Parameters for controlling consumption metering. - Attributes: - enabled (bool): - Whether to enable consumption metering for - this cluster. If enabled, a second BigQuery - table will be created to hold resource - consumption records. - """ - - enabled = proto.Field( - proto.BOOL, - number=1, - ) - - bigquery_destination = proto.Field( - proto.MESSAGE, - number=1, - message=BigQueryDestination, - ) - enable_network_egress_metering = proto.Field( - proto.BOOL, - number=2, - ) - consumption_metering_config = proto.Field( - proto.MESSAGE, - number=3, - message=ConsumptionMeteringConfig, - ) - - -class VerticalPodAutoscaling(proto.Message): - r"""VerticalPodAutoscaling contains global, per-cluster - information required by Vertical Pod Autoscaler to automatically - adjust the resources of pods controlled by it. - - Attributes: - enabled (bool): - Enables vertical pod autoscaling. - """ - - enabled = proto.Field( - proto.BOOL, - number=1, - ) - - -class DefaultSnatStatus(proto.Message): - r"""DefaultSnatStatus contains the desired state of whether - default sNAT should be disabled on the cluster. - - Attributes: - disabled (bool): - Disables cluster default sNAT rules. - """ - - disabled = proto.Field( - proto.BOOL, - number=1, - ) - - -class ShieldedNodes(proto.Message): - r"""Configuration of Shielded Nodes feature. - Attributes: - enabled (bool): - Whether Shielded Nodes features are enabled - on all nodes in this cluster. - """ - - enabled = proto.Field( - proto.BOOL, - number=1, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/mypy.ini b/owl-bot-staging/v1/mypy.ini deleted file mode 100644 index 4505b485..00000000 --- a/owl-bot-staging/v1/mypy.ini +++ /dev/null @@ -1,3 +0,0 @@ -[mypy] -python_version = 3.6 -namespace_packages = True diff --git a/owl-bot-staging/v1/noxfile.py b/owl-bot-staging/v1/noxfile.py deleted file mode 100644 index adbd707b..00000000 --- a/owl-bot-staging/v1/noxfile.py +++ /dev/null @@ -1,132 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import pathlib -import shutil -import subprocess -import sys - - -import nox # type: ignore - -CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() - -LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt" -PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8") - - -nox.sessions = [ - "unit", - "cover", - "mypy", - "check_lower_bounds" - # exclude update_lower_bounds from default - "docs", -] - -@nox.session(python=['3.6', '3.7', '3.8', '3.9']) -def unit(session): - """Run the unit test suite.""" - - session.install('coverage', 'pytest', 'pytest-cov', 'asyncmock', 'pytest-asyncio') - session.install('-e', '.') - - session.run( - 'py.test', - '--quiet', - '--cov=google/container_v1/', - '--cov-config=.coveragerc', - '--cov-report=term', - '--cov-report=html', - os.path.join('tests', 'unit', ''.join(session.posargs)) - ) - - -@nox.session(python='3.7') -def cover(session): - """Run the final coverage report. - This outputs the coverage report aggregating coverage from the unit - test runs (not system test runs), and then erases coverage data. - """ - session.install("coverage", "pytest-cov") - session.run("coverage", "report", "--show-missing", "--fail-under=100") - - session.run("coverage", "erase") - - -@nox.session(python=['3.6', '3.7']) -def mypy(session): - """Run the type checker.""" - session.install('mypy', 'types-pkg_resources') - session.install('.') - session.run( - 'mypy', - '--explicit-package-bases', - 'google', - ) - - -@nox.session -def update_lower_bounds(session): - """Update lower bounds in constraints.txt to match setup.py""" - session.install('google-cloud-testutils') - session.install('.') - - session.run( - 'lower-bound-checker', - 'update', - '--package-name', - PACKAGE_NAME, - '--constraints-file', - str(LOWER_BOUND_CONSTRAINTS_FILE), - ) - - -@nox.session -def check_lower_bounds(session): - """Check lower bounds in setup.py are reflected in constraints file""" - session.install('google-cloud-testutils') - session.install('.') - - session.run( - 'lower-bound-checker', - 'check', - '--package-name', - PACKAGE_NAME, - '--constraints-file', - str(LOWER_BOUND_CONSTRAINTS_FILE), - ) - -@nox.session(python='3.6') -def docs(session): - """Build the docs for this library.""" - - session.install("-e", ".") - session.install("sphinx<3.0.0", "alabaster", "recommonmark") - - shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) - session.run( - "sphinx-build", - "-W", # warnings as errors - "-T", # show full traceback on exception - "-N", # no colors - "-b", - "html", - "-d", - os.path.join("docs", "_build", "doctrees", ""), - os.path.join("docs", ""), - os.path.join("docs", "_build", "html", ""), - ) diff --git a/owl-bot-staging/v1/scripts/fixup_container_v1_keywords.py b/owl-bot-staging/v1/scripts/fixup_container_v1_keywords.py deleted file mode 100644 index 308cef05..00000000 --- a/owl-bot-staging/v1/scripts/fixup_container_v1_keywords.py +++ /dev/null @@ -1,207 +0,0 @@ -#! /usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import argparse -import os -import libcst as cst -import pathlib -import sys -from typing import (Any, Callable, Dict, List, Sequence, Tuple) - - -def partition( - predicate: Callable[[Any], bool], - iterator: Sequence[Any] -) -> Tuple[List[Any], List[Any]]: - """A stable, out-of-place partition.""" - results = ([], []) - - for i in iterator: - results[int(predicate(i))].append(i) - - # Returns trueList, falseList - return results[1], results[0] - - -class containerCallTransformer(cst.CSTTransformer): - CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') - METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { - 'cancel_operation': ('project_id', 'zone', 'operation_id', 'name', ), - 'complete_ip_rotation': ('project_id', 'zone', 'cluster_id', 'name', ), - 'create_cluster': ('cluster', 'project_id', 'zone', 'parent', ), - 'create_node_pool': ('node_pool', 'project_id', 'zone', 'cluster_id', 'parent', ), - 'delete_cluster': ('project_id', 'zone', 'cluster_id', 'name', ), - 'delete_node_pool': ('project_id', 'zone', 'cluster_id', 'node_pool_id', 'name', ), - 'get_cluster': ('project_id', 'zone', 'cluster_id', 'name', ), - 'get_json_web_keys': ('parent', ), - 'get_node_pool': ('project_id', 'zone', 'cluster_id', 'node_pool_id', 'name', ), - 'get_operation': ('project_id', 'zone', 'operation_id', 'name', ), - 'get_server_config': ('project_id', 'zone', 'name', ), - 'list_clusters': ('project_id', 'zone', 'parent', ), - 'list_node_pools': ('project_id', 'zone', 'cluster_id', 'parent', ), - 'list_operations': ('project_id', 'zone', 'parent', ), - 'list_usable_subnetworks': ('parent', 'filter', 'page_size', 'page_token', ), - 'rollback_node_pool_upgrade': ('project_id', 'zone', 'cluster_id', 'node_pool_id', 'name', ), - 'set_addons_config': ('addons_config', 'project_id', 'zone', 'cluster_id', 'name', ), - 'set_labels': ('resource_labels', 'label_fingerprint', 'project_id', 'zone', 'cluster_id', 'name', ), - 'set_legacy_abac': ('enabled', 'project_id', 'zone', 'cluster_id', 'name', ), - 'set_locations': ('locations', 'project_id', 'zone', 'cluster_id', 'name', ), - 'set_logging_service': ('logging_service', 'project_id', 'zone', 'cluster_id', 'name', ), - 'set_maintenance_policy': ('project_id', 'zone', 'cluster_id', 'maintenance_policy', 'name', ), - 'set_master_auth': ('action', 'update', 'project_id', 'zone', 'cluster_id', 'name', ), - 'set_monitoring_service': ('monitoring_service', 'project_id', 'zone', 'cluster_id', 'name', ), - 'set_network_policy': ('network_policy', 'project_id', 'zone', 'cluster_id', 'name', ), - 'set_node_pool_autoscaling': ('autoscaling', 'project_id', 'zone', 'cluster_id', 'node_pool_id', 'name', ), - 'set_node_pool_management': ('management', 'project_id', 'zone', 'cluster_id', 'node_pool_id', 'name', ), - 'set_node_pool_size': ('node_count', 'project_id', 'zone', 'cluster_id', 'node_pool_id', 'name', ), - 'start_ip_rotation': ('project_id', 'zone', 'cluster_id', 'name', 'rotate_credentials', ), - 'update_cluster': ('update', 'project_id', 'zone', 'cluster_id', 'name', ), - 'update_master': ('master_version', 'project_id', 'zone', 'cluster_id', 'name', ), - 'update_node_pool': ('node_version', 'image_type', 'project_id', 'zone', 'cluster_id', 'node_pool_id', 'name', 'locations', 'workload_metadata_config', 'upgrade_settings', ), - } - - def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: - try: - key = original.func.attr.value - kword_params = self.METHOD_TO_PARAMS[key] - except (AttributeError, KeyError): - # Either not a method from the API or too convoluted to be sure. - return updated - - # If the existing code is valid, keyword args come after positional args. - # Therefore, all positional args must map to the first parameters. - args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) - if any(k.keyword.value == "request" for k in kwargs): - # We've already fixed this file, don't fix it again. - return updated - - kwargs, ctrl_kwargs = partition( - lambda a: not a.keyword.value in self.CTRL_PARAMS, - kwargs - ) - - args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] - ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) - for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) - - request_arg = cst.Arg( - value=cst.Dict([ - cst.DictElement( - cst.SimpleString("'{}'".format(name)), -cst.Element(value=arg.value) - ) - # Note: the args + kwargs looks silly, but keep in mind that - # the control parameters had to be stripped out, and that - # those could have been passed positionally or by keyword. - for name, arg in zip(kword_params, args + kwargs)]), - keyword=cst.Name("request") - ) - - return updated.with_changes( - args=[request_arg] + ctrl_kwargs - ) - - -def fix_files( - in_dir: pathlib.Path, - out_dir: pathlib.Path, - *, - transformer=containerCallTransformer(), -): - """Duplicate the input dir to the output dir, fixing file method calls. - - Preconditions: - * in_dir is a real directory - * out_dir is a real, empty directory - """ - pyfile_gen = ( - pathlib.Path(os.path.join(root, f)) - for root, _, files in os.walk(in_dir) - for f in files if os.path.splitext(f)[1] == ".py" - ) - - for fpath in pyfile_gen: - with open(fpath, 'r') as f: - src = f.read() - - # Parse the code and insert method call fixes. - tree = cst.parse_module(src) - updated = tree.visit(transformer) - - # Create the path and directory structure for the new file. - updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) - updated_path.parent.mkdir(parents=True, exist_ok=True) - - # Generate the updated source file at the corresponding path. - with open(updated_path, 'w') as f: - f.write(updated.code) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description="""Fix up source that uses the container client library. - -The existing sources are NOT overwritten but are copied to output_dir with changes made. - -Note: This tool operates at a best-effort level at converting positional - parameters in client method calls to keyword based parameters. - Cases where it WILL FAIL include - A) * or ** expansion in a method call. - B) Calls via function or method alias (includes free function calls) - C) Indirect or dispatched calls (e.g. the method is looked up dynamically) - - These all constitute false negatives. The tool will also detect false - positives when an API method shares a name with another method. -""") - parser.add_argument( - '-d', - '--input-directory', - required=True, - dest='input_dir', - help='the input directory to walk for python files to fix up', - ) - parser.add_argument( - '-o', - '--output-directory', - required=True, - dest='output_dir', - help='the directory to output files fixed via un-flattening', - ) - args = parser.parse_args() - input_dir = pathlib.Path(args.input_dir) - output_dir = pathlib.Path(args.output_dir) - if not input_dir.is_dir(): - print( - f"input directory '{input_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if not output_dir.is_dir(): - print( - f"output directory '{output_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if os.listdir(output_dir): - print( - f"output directory '{output_dir}' is not empty", - file=sys.stderr, - ) - sys.exit(-1) - - fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1/setup.py b/owl-bot-staging/v1/setup.py deleted file mode 100644 index eb059ee8..00000000 --- a/owl-bot-staging/v1/setup.py +++ /dev/null @@ -1,54 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import io -import os -import setuptools # type: ignore - -version = '0.1.0' - -package_root = os.path.abspath(os.path.dirname(__file__)) - -readme_filename = os.path.join(package_root, 'README.rst') -with io.open(readme_filename, encoding='utf-8') as readme_file: - readme = readme_file.read() - -setuptools.setup( - name='google-container', - version=version, - long_description=readme, - packages=setuptools.PEP420PackageFinder.find(), - namespace_packages=('google',), - platforms='Posix; MacOS X; Windows', - include_package_data=True, - install_requires=( - 'google-api-core[grpc] >= 1.27.0, < 3.0.0dev', - 'libcst >= 0.2.5', - 'proto-plus >= 1.15.0', - 'packaging >= 14.3', ), - python_requires='>=3.6', - classifiers=[ - 'Development Status :: 3 - Alpha', - 'Intended Audience :: Developers', - 'Operating System :: OS Independent', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', - 'Programming Language :: Python :: 3.8', - 'Programming Language :: Python :: 3.9', - 'Topic :: Internet', - 'Topic :: Software Development :: Libraries :: Python Modules', - ], - zip_safe=False, -) diff --git a/owl-bot-staging/v1/tests/__init__.py b/owl-bot-staging/v1/tests/__init__.py deleted file mode 100644 index b54a5fcc..00000000 --- a/owl-bot-staging/v1/tests/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/tests/unit/__init__.py b/owl-bot-staging/v1/tests/unit/__init__.py deleted file mode 100644 index b54a5fcc..00000000 --- a/owl-bot-staging/v1/tests/unit/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/tests/unit/gapic/__init__.py b/owl-bot-staging/v1/tests/unit/gapic/__init__.py deleted file mode 100644 index b54a5fcc..00000000 --- a/owl-bot-staging/v1/tests/unit/gapic/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/tests/unit/gapic/container_v1/__init__.py b/owl-bot-staging/v1/tests/unit/gapic/container_v1/__init__.py deleted file mode 100644 index b54a5fcc..00000000 --- a/owl-bot-staging/v1/tests/unit/gapic/container_v1/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1/tests/unit/gapic/container_v1/test_cluster_manager.py b/owl-bot-staging/v1/tests/unit/gapic/container_v1/test_cluster_manager.py deleted file mode 100644 index 7509850a..00000000 --- a/owl-bot-staging/v1/tests/unit/gapic/container_v1/test_cluster_manager.py +++ /dev/null @@ -1,9434 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock -import packaging.version - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.container_v1.services.cluster_manager import ClusterManagerAsyncClient -from google.container_v1.services.cluster_manager import ClusterManagerClient -from google.container_v1.services.cluster_manager import pagers -from google.container_v1.services.cluster_manager import transports -from google.container_v1.services.cluster_manager.transports.base import _GOOGLE_AUTH_VERSION -from google.container_v1.types import cluster_service -from google.oauth2 import service_account -from google.protobuf import timestamp_pb2 # type: ignore -import google.auth - - -# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively -# through google-api-core: -# - Delete the auth "less than" test cases -# - Delete these pytest markers (Make the "greater than or equal to" tests the default). -requires_google_auth_lt_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), - reason="This test requires google-auth < 1.25.0", -) -requires_google_auth_gte_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), - reason="This test requires google-auth >= 1.25.0", -) - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert ClusterManagerClient._get_default_mtls_endpoint(None) is None - assert ClusterManagerClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert ClusterManagerClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert ClusterManagerClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert ClusterManagerClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert ClusterManagerClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - ClusterManagerClient, - ClusterManagerAsyncClient, -]) -def test_cluster_manager_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'container.googleapis.com:443' - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.ClusterManagerGrpcTransport, "grpc"), - (transports.ClusterManagerGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_cluster_manager_client_service_account_always_use_jwt(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=False) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("client_class", [ - ClusterManagerClient, - ClusterManagerAsyncClient, -]) -def test_cluster_manager_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'container.googleapis.com:443' - - -def test_cluster_manager_client_get_transport_class(): - transport = ClusterManagerClient.get_transport_class() - available_transports = [ - transports.ClusterManagerGrpcTransport, - ] - assert transport in available_transports - - transport = ClusterManagerClient.get_transport_class("grpc") - assert transport == transports.ClusterManagerGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (ClusterManagerClient, transports.ClusterManagerGrpcTransport, "grpc"), - (ClusterManagerAsyncClient, transports.ClusterManagerGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(ClusterManagerClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ClusterManagerClient)) -@mock.patch.object(ClusterManagerAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ClusterManagerAsyncClient)) -def test_cluster_manager_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(ClusterManagerClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(ClusterManagerClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (ClusterManagerClient, transports.ClusterManagerGrpcTransport, "grpc", "true"), - (ClusterManagerAsyncClient, transports.ClusterManagerGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (ClusterManagerClient, transports.ClusterManagerGrpcTransport, "grpc", "false"), - (ClusterManagerAsyncClient, transports.ClusterManagerGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(ClusterManagerClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ClusterManagerClient)) -@mock.patch.object(ClusterManagerAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ClusterManagerAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_cluster_manager_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (ClusterManagerClient, transports.ClusterManagerGrpcTransport, "grpc"), - (ClusterManagerAsyncClient, transports.ClusterManagerGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_cluster_manager_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (ClusterManagerClient, transports.ClusterManagerGrpcTransport, "grpc"), - (ClusterManagerAsyncClient, transports.ClusterManagerGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_cluster_manager_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_cluster_manager_client_client_options_from_dict(): - with mock.patch('google.container_v1.services.cluster_manager.transports.ClusterManagerGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = ClusterManagerClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_list_clusters(transport: str = 'grpc', request_type=cluster_service.ListClustersRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_clusters), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.ListClustersResponse( - missing_zones=['missing_zones_value'], - ) - response = client.list_clusters(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.ListClustersRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.ListClustersResponse) - assert response.missing_zones == ['missing_zones_value'] - - -def test_list_clusters_from_dict(): - test_list_clusters(request_type=dict) - - -def test_list_clusters_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_clusters), - '__call__') as call: - client.list_clusters() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.ListClustersRequest() - - -@pytest.mark.asyncio -async def test_list_clusters_async(transport: str = 'grpc_asyncio', request_type=cluster_service.ListClustersRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_clusters), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListClustersResponse( - missing_zones=['missing_zones_value'], - )) - response = await client.list_clusters(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.ListClustersRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.ListClustersResponse) - assert response.missing_zones == ['missing_zones_value'] - - -@pytest.mark.asyncio -async def test_list_clusters_async_from_dict(): - await test_list_clusters_async(request_type=dict) - - -def test_list_clusters_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.ListClustersRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_clusters), - '__call__') as call: - call.return_value = cluster_service.ListClustersResponse() - client.list_clusters(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_clusters_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.ListClustersRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_clusters), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListClustersResponse()) - await client.list_clusters(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_clusters_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_clusters), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.ListClustersResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_clusters( - project_id='project_id_value', - zone='zone_value', - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].parent == 'parent_value' - - -def test_list_clusters_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_clusters( - cluster_service.ListClustersRequest(), - project_id='project_id_value', - zone='zone_value', - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_clusters_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_clusters), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.ListClustersResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListClustersResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_clusters( - project_id='project_id_value', - zone='zone_value', - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_clusters_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_clusters( - cluster_service.ListClustersRequest(), - project_id='project_id_value', - zone='zone_value', - parent='parent_value', - ) - - -def test_get_cluster(transport: str = 'grpc', request_type=cluster_service.GetClusterRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Cluster( - name='name_value', - description='description_value', - initial_node_count=1911, - logging_service='logging_service_value', - monitoring_service='monitoring_service_value', - network='network_value', - cluster_ipv4_cidr='cluster_ipv4_cidr_value', - subnetwork='subnetwork_value', - locations=['locations_value'], - enable_kubernetes_alpha=True, - label_fingerprint='label_fingerprint_value', - self_link='self_link_value', - zone='zone_value', - endpoint='endpoint_value', - initial_cluster_version='initial_cluster_version_value', - current_master_version='current_master_version_value', - current_node_version='current_node_version_value', - create_time='create_time_value', - status=cluster_service.Cluster.Status.PROVISIONING, - status_message='status_message_value', - node_ipv4_cidr_size=1955, - services_ipv4_cidr='services_ipv4_cidr_value', - instance_group_urls=['instance_group_urls_value'], - current_node_count=1936, - expire_time='expire_time_value', - location='location_value', - enable_tpu=True, - tpu_ipv4_cidr_block='tpu_ipv4_cidr_block_value', - ) - response = client.get_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.GetClusterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Cluster) - assert response.name == 'name_value' - assert response.description == 'description_value' - assert response.initial_node_count == 1911 - assert response.logging_service == 'logging_service_value' - assert response.monitoring_service == 'monitoring_service_value' - assert response.network == 'network_value' - assert response.cluster_ipv4_cidr == 'cluster_ipv4_cidr_value' - assert response.subnetwork == 'subnetwork_value' - assert response.locations == ['locations_value'] - assert response.enable_kubernetes_alpha is True - assert response.label_fingerprint == 'label_fingerprint_value' - assert response.self_link == 'self_link_value' - assert response.zone == 'zone_value' - assert response.endpoint == 'endpoint_value' - assert response.initial_cluster_version == 'initial_cluster_version_value' - assert response.current_master_version == 'current_master_version_value' - assert response.current_node_version == 'current_node_version_value' - assert response.create_time == 'create_time_value' - assert response.status == cluster_service.Cluster.Status.PROVISIONING - assert response.status_message == 'status_message_value' - assert response.node_ipv4_cidr_size == 1955 - assert response.services_ipv4_cidr == 'services_ipv4_cidr_value' - assert response.instance_group_urls == ['instance_group_urls_value'] - assert response.current_node_count == 1936 - assert response.expire_time == 'expire_time_value' - assert response.location == 'location_value' - assert response.enable_tpu is True - assert response.tpu_ipv4_cidr_block == 'tpu_ipv4_cidr_block_value' - - -def test_get_cluster_from_dict(): - test_get_cluster(request_type=dict) - - -def test_get_cluster_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_cluster), - '__call__') as call: - client.get_cluster() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.GetClusterRequest() - - -@pytest.mark.asyncio -async def test_get_cluster_async(transport: str = 'grpc_asyncio', request_type=cluster_service.GetClusterRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Cluster( - name='name_value', - description='description_value', - initial_node_count=1911, - logging_service='logging_service_value', - monitoring_service='monitoring_service_value', - network='network_value', - cluster_ipv4_cidr='cluster_ipv4_cidr_value', - subnetwork='subnetwork_value', - locations=['locations_value'], - enable_kubernetes_alpha=True, - label_fingerprint='label_fingerprint_value', - self_link='self_link_value', - zone='zone_value', - endpoint='endpoint_value', - initial_cluster_version='initial_cluster_version_value', - current_master_version='current_master_version_value', - current_node_version='current_node_version_value', - create_time='create_time_value', - status=cluster_service.Cluster.Status.PROVISIONING, - status_message='status_message_value', - node_ipv4_cidr_size=1955, - services_ipv4_cidr='services_ipv4_cidr_value', - instance_group_urls=['instance_group_urls_value'], - current_node_count=1936, - expire_time='expire_time_value', - location='location_value', - enable_tpu=True, - tpu_ipv4_cidr_block='tpu_ipv4_cidr_block_value', - )) - response = await client.get_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.GetClusterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Cluster) - assert response.name == 'name_value' - assert response.description == 'description_value' - assert response.initial_node_count == 1911 - assert response.logging_service == 'logging_service_value' - assert response.monitoring_service == 'monitoring_service_value' - assert response.network == 'network_value' - assert response.cluster_ipv4_cidr == 'cluster_ipv4_cidr_value' - assert response.subnetwork == 'subnetwork_value' - assert response.locations == ['locations_value'] - assert response.enable_kubernetes_alpha is True - assert response.label_fingerprint == 'label_fingerprint_value' - assert response.self_link == 'self_link_value' - assert response.zone == 'zone_value' - assert response.endpoint == 'endpoint_value' - assert response.initial_cluster_version == 'initial_cluster_version_value' - assert response.current_master_version == 'current_master_version_value' - assert response.current_node_version == 'current_node_version_value' - assert response.create_time == 'create_time_value' - assert response.status == cluster_service.Cluster.Status.PROVISIONING - assert response.status_message == 'status_message_value' - assert response.node_ipv4_cidr_size == 1955 - assert response.services_ipv4_cidr == 'services_ipv4_cidr_value' - assert response.instance_group_urls == ['instance_group_urls_value'] - assert response.current_node_count == 1936 - assert response.expire_time == 'expire_time_value' - assert response.location == 'location_value' - assert response.enable_tpu is True - assert response.tpu_ipv4_cidr_block == 'tpu_ipv4_cidr_block_value' - - -@pytest.mark.asyncio -async def test_get_cluster_async_from_dict(): - await test_get_cluster_async(request_type=dict) - - -def test_get_cluster_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.GetClusterRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_cluster), - '__call__') as call: - call.return_value = cluster_service.Cluster() - client.get_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_cluster_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.GetClusterRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_cluster), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Cluster()) - await client.get_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_cluster_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Cluster() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_cluster( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].name == 'name_value' - - -def test_get_cluster_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_cluster( - cluster_service.GetClusterRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_cluster_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Cluster() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Cluster()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_cluster( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_cluster_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_cluster( - cluster_service.GetClusterRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - name='name_value', - ) - - -def test_create_cluster(transport: str = 'grpc', request_type=cluster_service.CreateClusterRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - ) - response = client.create_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.CreateClusterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -def test_create_cluster_from_dict(): - test_create_cluster(request_type=dict) - - -def test_create_cluster_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_cluster), - '__call__') as call: - client.create_cluster() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.CreateClusterRequest() - - -@pytest.mark.asyncio -async def test_create_cluster_async(transport: str = 'grpc_asyncio', request_type=cluster_service.CreateClusterRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - )) - response = await client.create_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.CreateClusterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -@pytest.mark.asyncio -async def test_create_cluster_async_from_dict(): - await test_create_cluster_async(request_type=dict) - - -def test_create_cluster_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.CreateClusterRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_cluster), - '__call__') as call: - call.return_value = cluster_service.Operation() - client.create_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_cluster_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.CreateClusterRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_cluster), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - await client.create_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_cluster_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_cluster( - project_id='project_id_value', - zone='zone_value', - cluster=cluster_service.Cluster(name='name_value'), - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster == cluster_service.Cluster(name='name_value') - assert args[0].parent == 'parent_value' - - -def test_create_cluster_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_cluster( - cluster_service.CreateClusterRequest(), - project_id='project_id_value', - zone='zone_value', - cluster=cluster_service.Cluster(name='name_value'), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_create_cluster_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_cluster( - project_id='project_id_value', - zone='zone_value', - cluster=cluster_service.Cluster(name='name_value'), - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster == cluster_service.Cluster(name='name_value') - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_create_cluster_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_cluster( - cluster_service.CreateClusterRequest(), - project_id='project_id_value', - zone='zone_value', - cluster=cluster_service.Cluster(name='name_value'), - parent='parent_value', - ) - - -def test_update_cluster(transport: str = 'grpc', request_type=cluster_service.UpdateClusterRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - ) - response = client.update_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.UpdateClusterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -def test_update_cluster_from_dict(): - test_update_cluster(request_type=dict) - - -def test_update_cluster_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_cluster), - '__call__') as call: - client.update_cluster() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.UpdateClusterRequest() - - -@pytest.mark.asyncio -async def test_update_cluster_async(transport: str = 'grpc_asyncio', request_type=cluster_service.UpdateClusterRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - )) - response = await client.update_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.UpdateClusterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -@pytest.mark.asyncio -async def test_update_cluster_async_from_dict(): - await test_update_cluster_async(request_type=dict) - - -def test_update_cluster_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.UpdateClusterRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_cluster), - '__call__') as call: - call.return_value = cluster_service.Operation() - client.update_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_cluster_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.UpdateClusterRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_cluster), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - await client.update_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_update_cluster_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_cluster( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - update=cluster_service.ClusterUpdate(desired_node_version='desired_node_version_value'), - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].update == cluster_service.ClusterUpdate(desired_node_version='desired_node_version_value') - assert args[0].name == 'name_value' - - -def test_update_cluster_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_cluster( - cluster_service.UpdateClusterRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - update=cluster_service.ClusterUpdate(desired_node_version='desired_node_version_value'), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_update_cluster_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_cluster( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - update=cluster_service.ClusterUpdate(desired_node_version='desired_node_version_value'), - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].update == cluster_service.ClusterUpdate(desired_node_version='desired_node_version_value') - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_update_cluster_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_cluster( - cluster_service.UpdateClusterRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - update=cluster_service.ClusterUpdate(desired_node_version='desired_node_version_value'), - name='name_value', - ) - - -def test_update_node_pool(transport: str = 'grpc', request_type=cluster_service.UpdateNodePoolRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_node_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - ) - response = client.update_node_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.UpdateNodePoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -def test_update_node_pool_from_dict(): - test_update_node_pool(request_type=dict) - - -def test_update_node_pool_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_node_pool), - '__call__') as call: - client.update_node_pool() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.UpdateNodePoolRequest() - - -@pytest.mark.asyncio -async def test_update_node_pool_async(transport: str = 'grpc_asyncio', request_type=cluster_service.UpdateNodePoolRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_node_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - )) - response = await client.update_node_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.UpdateNodePoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -@pytest.mark.asyncio -async def test_update_node_pool_async_from_dict(): - await test_update_node_pool_async(request_type=dict) - - -def test_update_node_pool_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.UpdateNodePoolRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_node_pool), - '__call__') as call: - call.return_value = cluster_service.Operation() - client.update_node_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_node_pool_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.UpdateNodePoolRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_node_pool), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - await client.update_node_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_set_node_pool_autoscaling(transport: str = 'grpc', request_type=cluster_service.SetNodePoolAutoscalingRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_node_pool_autoscaling), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - ) - response = client.set_node_pool_autoscaling(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetNodePoolAutoscalingRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -def test_set_node_pool_autoscaling_from_dict(): - test_set_node_pool_autoscaling(request_type=dict) - - -def test_set_node_pool_autoscaling_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_node_pool_autoscaling), - '__call__') as call: - client.set_node_pool_autoscaling() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetNodePoolAutoscalingRequest() - - -@pytest.mark.asyncio -async def test_set_node_pool_autoscaling_async(transport: str = 'grpc_asyncio', request_type=cluster_service.SetNodePoolAutoscalingRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_node_pool_autoscaling), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - )) - response = await client.set_node_pool_autoscaling(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetNodePoolAutoscalingRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -@pytest.mark.asyncio -async def test_set_node_pool_autoscaling_async_from_dict(): - await test_set_node_pool_autoscaling_async(request_type=dict) - - -def test_set_node_pool_autoscaling_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.SetNodePoolAutoscalingRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_node_pool_autoscaling), - '__call__') as call: - call.return_value = cluster_service.Operation() - client.set_node_pool_autoscaling(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_set_node_pool_autoscaling_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.SetNodePoolAutoscalingRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_node_pool_autoscaling), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - await client.set_node_pool_autoscaling(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_set_logging_service(transport: str = 'grpc', request_type=cluster_service.SetLoggingServiceRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_logging_service), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - ) - response = client.set_logging_service(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetLoggingServiceRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -def test_set_logging_service_from_dict(): - test_set_logging_service(request_type=dict) - - -def test_set_logging_service_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_logging_service), - '__call__') as call: - client.set_logging_service() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetLoggingServiceRequest() - - -@pytest.mark.asyncio -async def test_set_logging_service_async(transport: str = 'grpc_asyncio', request_type=cluster_service.SetLoggingServiceRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_logging_service), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - )) - response = await client.set_logging_service(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetLoggingServiceRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -@pytest.mark.asyncio -async def test_set_logging_service_async_from_dict(): - await test_set_logging_service_async(request_type=dict) - - -def test_set_logging_service_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.SetLoggingServiceRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_logging_service), - '__call__') as call: - call.return_value = cluster_service.Operation() - client.set_logging_service(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_set_logging_service_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.SetLoggingServiceRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_logging_service), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - await client.set_logging_service(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_set_logging_service_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_logging_service), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.set_logging_service( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - logging_service='logging_service_value', - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].logging_service == 'logging_service_value' - assert args[0].name == 'name_value' - - -def test_set_logging_service_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.set_logging_service( - cluster_service.SetLoggingServiceRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - logging_service='logging_service_value', - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_set_logging_service_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_logging_service), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.set_logging_service( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - logging_service='logging_service_value', - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].logging_service == 'logging_service_value' - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_set_logging_service_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.set_logging_service( - cluster_service.SetLoggingServiceRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - logging_service='logging_service_value', - name='name_value', - ) - - -def test_set_monitoring_service(transport: str = 'grpc', request_type=cluster_service.SetMonitoringServiceRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_monitoring_service), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - ) - response = client.set_monitoring_service(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetMonitoringServiceRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -def test_set_monitoring_service_from_dict(): - test_set_monitoring_service(request_type=dict) - - -def test_set_monitoring_service_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_monitoring_service), - '__call__') as call: - client.set_monitoring_service() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetMonitoringServiceRequest() - - -@pytest.mark.asyncio -async def test_set_monitoring_service_async(transport: str = 'grpc_asyncio', request_type=cluster_service.SetMonitoringServiceRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_monitoring_service), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - )) - response = await client.set_monitoring_service(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetMonitoringServiceRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -@pytest.mark.asyncio -async def test_set_monitoring_service_async_from_dict(): - await test_set_monitoring_service_async(request_type=dict) - - -def test_set_monitoring_service_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.SetMonitoringServiceRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_monitoring_service), - '__call__') as call: - call.return_value = cluster_service.Operation() - client.set_monitoring_service(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_set_monitoring_service_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.SetMonitoringServiceRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_monitoring_service), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - await client.set_monitoring_service(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_set_monitoring_service_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_monitoring_service), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.set_monitoring_service( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - monitoring_service='monitoring_service_value', - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].monitoring_service == 'monitoring_service_value' - assert args[0].name == 'name_value' - - -def test_set_monitoring_service_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.set_monitoring_service( - cluster_service.SetMonitoringServiceRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - monitoring_service='monitoring_service_value', - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_set_monitoring_service_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_monitoring_service), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.set_monitoring_service( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - monitoring_service='monitoring_service_value', - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].monitoring_service == 'monitoring_service_value' - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_set_monitoring_service_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.set_monitoring_service( - cluster_service.SetMonitoringServiceRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - monitoring_service='monitoring_service_value', - name='name_value', - ) - - -def test_set_addons_config(transport: str = 'grpc', request_type=cluster_service.SetAddonsConfigRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_addons_config), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - ) - response = client.set_addons_config(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetAddonsConfigRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -def test_set_addons_config_from_dict(): - test_set_addons_config(request_type=dict) - - -def test_set_addons_config_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_addons_config), - '__call__') as call: - client.set_addons_config() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetAddonsConfigRequest() - - -@pytest.mark.asyncio -async def test_set_addons_config_async(transport: str = 'grpc_asyncio', request_type=cluster_service.SetAddonsConfigRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_addons_config), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - )) - response = await client.set_addons_config(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetAddonsConfigRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -@pytest.mark.asyncio -async def test_set_addons_config_async_from_dict(): - await test_set_addons_config_async(request_type=dict) - - -def test_set_addons_config_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.SetAddonsConfigRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_addons_config), - '__call__') as call: - call.return_value = cluster_service.Operation() - client.set_addons_config(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_set_addons_config_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.SetAddonsConfigRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_addons_config), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - await client.set_addons_config(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_set_addons_config_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_addons_config), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.set_addons_config( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - addons_config=cluster_service.AddonsConfig(http_load_balancing=cluster_service.HttpLoadBalancing(disabled=True)), - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].addons_config == cluster_service.AddonsConfig(http_load_balancing=cluster_service.HttpLoadBalancing(disabled=True)) - assert args[0].name == 'name_value' - - -def test_set_addons_config_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.set_addons_config( - cluster_service.SetAddonsConfigRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - addons_config=cluster_service.AddonsConfig(http_load_balancing=cluster_service.HttpLoadBalancing(disabled=True)), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_set_addons_config_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_addons_config), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.set_addons_config( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - addons_config=cluster_service.AddonsConfig(http_load_balancing=cluster_service.HttpLoadBalancing(disabled=True)), - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].addons_config == cluster_service.AddonsConfig(http_load_balancing=cluster_service.HttpLoadBalancing(disabled=True)) - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_set_addons_config_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.set_addons_config( - cluster_service.SetAddonsConfigRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - addons_config=cluster_service.AddonsConfig(http_load_balancing=cluster_service.HttpLoadBalancing(disabled=True)), - name='name_value', - ) - - -def test_set_locations(transport: str = 'grpc', request_type=cluster_service.SetLocationsRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_locations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - ) - response = client.set_locations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetLocationsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -def test_set_locations_from_dict(): - test_set_locations(request_type=dict) - - -def test_set_locations_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_locations), - '__call__') as call: - client.set_locations() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetLocationsRequest() - - -@pytest.mark.asyncio -async def test_set_locations_async(transport: str = 'grpc_asyncio', request_type=cluster_service.SetLocationsRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_locations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - )) - response = await client.set_locations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetLocationsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -@pytest.mark.asyncio -async def test_set_locations_async_from_dict(): - await test_set_locations_async(request_type=dict) - - -def test_set_locations_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.SetLocationsRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_locations), - '__call__') as call: - call.return_value = cluster_service.Operation() - client.set_locations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_set_locations_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.SetLocationsRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_locations), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - await client.set_locations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_set_locations_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_locations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.set_locations( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - locations=['locations_value'], - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].locations == ['locations_value'] - assert args[0].name == 'name_value' - - -def test_set_locations_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.set_locations( - cluster_service.SetLocationsRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - locations=['locations_value'], - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_set_locations_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_locations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.set_locations( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - locations=['locations_value'], - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].locations == ['locations_value'] - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_set_locations_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.set_locations( - cluster_service.SetLocationsRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - locations=['locations_value'], - name='name_value', - ) - - -def test_update_master(transport: str = 'grpc', request_type=cluster_service.UpdateMasterRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_master), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - ) - response = client.update_master(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.UpdateMasterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -def test_update_master_from_dict(): - test_update_master(request_type=dict) - - -def test_update_master_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_master), - '__call__') as call: - client.update_master() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.UpdateMasterRequest() - - -@pytest.mark.asyncio -async def test_update_master_async(transport: str = 'grpc_asyncio', request_type=cluster_service.UpdateMasterRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_master), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - )) - response = await client.update_master(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.UpdateMasterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -@pytest.mark.asyncio -async def test_update_master_async_from_dict(): - await test_update_master_async(request_type=dict) - - -def test_update_master_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.UpdateMasterRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_master), - '__call__') as call: - call.return_value = cluster_service.Operation() - client.update_master(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_master_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.UpdateMasterRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_master), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - await client.update_master(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_update_master_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_master), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_master( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - master_version='master_version_value', - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].master_version == 'master_version_value' - assert args[0].name == 'name_value' - - -def test_update_master_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_master( - cluster_service.UpdateMasterRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - master_version='master_version_value', - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_update_master_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_master), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_master( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - master_version='master_version_value', - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].master_version == 'master_version_value' - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_update_master_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_master( - cluster_service.UpdateMasterRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - master_version='master_version_value', - name='name_value', - ) - - -def test_set_master_auth(transport: str = 'grpc', request_type=cluster_service.SetMasterAuthRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_master_auth), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - ) - response = client.set_master_auth(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetMasterAuthRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -def test_set_master_auth_from_dict(): - test_set_master_auth(request_type=dict) - - -def test_set_master_auth_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_master_auth), - '__call__') as call: - client.set_master_auth() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetMasterAuthRequest() - - -@pytest.mark.asyncio -async def test_set_master_auth_async(transport: str = 'grpc_asyncio', request_type=cluster_service.SetMasterAuthRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_master_auth), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - )) - response = await client.set_master_auth(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetMasterAuthRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -@pytest.mark.asyncio -async def test_set_master_auth_async_from_dict(): - await test_set_master_auth_async(request_type=dict) - - -def test_set_master_auth_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.SetMasterAuthRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_master_auth), - '__call__') as call: - call.return_value = cluster_service.Operation() - client.set_master_auth(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_set_master_auth_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.SetMasterAuthRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_master_auth), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - await client.set_master_auth(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_cluster(transport: str = 'grpc', request_type=cluster_service.DeleteClusterRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - ) - response = client.delete_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.DeleteClusterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -def test_delete_cluster_from_dict(): - test_delete_cluster(request_type=dict) - - -def test_delete_cluster_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_cluster), - '__call__') as call: - client.delete_cluster() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.DeleteClusterRequest() - - -@pytest.mark.asyncio -async def test_delete_cluster_async(transport: str = 'grpc_asyncio', request_type=cluster_service.DeleteClusterRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - )) - response = await client.delete_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.DeleteClusterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -@pytest.mark.asyncio -async def test_delete_cluster_async_from_dict(): - await test_delete_cluster_async(request_type=dict) - - -def test_delete_cluster_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.DeleteClusterRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_cluster), - '__call__') as call: - call.return_value = cluster_service.Operation() - client.delete_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_cluster_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.DeleteClusterRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_cluster), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - await client.delete_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_cluster_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_cluster( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].name == 'name_value' - - -def test_delete_cluster_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_cluster( - cluster_service.DeleteClusterRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_cluster_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_cluster( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_delete_cluster_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_cluster( - cluster_service.DeleteClusterRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - name='name_value', - ) - - -def test_list_operations(transport: str = 'grpc', request_type=cluster_service.ListOperationsRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_operations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.ListOperationsResponse( - missing_zones=['missing_zones_value'], - ) - response = client.list_operations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.ListOperationsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.ListOperationsResponse) - assert response.missing_zones == ['missing_zones_value'] - - -def test_list_operations_from_dict(): - test_list_operations(request_type=dict) - - -def test_list_operations_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_operations), - '__call__') as call: - client.list_operations() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.ListOperationsRequest() - - -@pytest.mark.asyncio -async def test_list_operations_async(transport: str = 'grpc_asyncio', request_type=cluster_service.ListOperationsRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_operations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListOperationsResponse( - missing_zones=['missing_zones_value'], - )) - response = await client.list_operations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.ListOperationsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.ListOperationsResponse) - assert response.missing_zones == ['missing_zones_value'] - - -@pytest.mark.asyncio -async def test_list_operations_async_from_dict(): - await test_list_operations_async(request_type=dict) - - -def test_list_operations_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.ListOperationsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_operations), - '__call__') as call: - call.return_value = cluster_service.ListOperationsResponse() - client.list_operations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_operations_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.ListOperationsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_operations), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListOperationsResponse()) - await client.list_operations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_operations_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_operations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.ListOperationsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_operations( - project_id='project_id_value', - zone='zone_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - - -def test_list_operations_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_operations( - cluster_service.ListOperationsRequest(), - project_id='project_id_value', - zone='zone_value', - ) - - -@pytest.mark.asyncio -async def test_list_operations_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_operations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.ListOperationsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListOperationsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_operations( - project_id='project_id_value', - zone='zone_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - - -@pytest.mark.asyncio -async def test_list_operations_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_operations( - cluster_service.ListOperationsRequest(), - project_id='project_id_value', - zone='zone_value', - ) - - -def test_get_operation(transport: str = 'grpc', request_type=cluster_service.GetOperationRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_operation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - ) - response = client.get_operation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.GetOperationRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -def test_get_operation_from_dict(): - test_get_operation(request_type=dict) - - -def test_get_operation_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_operation), - '__call__') as call: - client.get_operation() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.GetOperationRequest() - - -@pytest.mark.asyncio -async def test_get_operation_async(transport: str = 'grpc_asyncio', request_type=cluster_service.GetOperationRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_operation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - )) - response = await client.get_operation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.GetOperationRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -@pytest.mark.asyncio -async def test_get_operation_async_from_dict(): - await test_get_operation_async(request_type=dict) - - -def test_get_operation_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.GetOperationRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_operation), - '__call__') as call: - call.return_value = cluster_service.Operation() - client.get_operation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_operation_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.GetOperationRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_operation), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - await client.get_operation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_operation_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_operation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_operation( - project_id='project_id_value', - zone='zone_value', - operation_id='operation_id_value', - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].operation_id == 'operation_id_value' - assert args[0].name == 'name_value' - - -def test_get_operation_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_operation( - cluster_service.GetOperationRequest(), - project_id='project_id_value', - zone='zone_value', - operation_id='operation_id_value', - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_operation_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_operation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_operation( - project_id='project_id_value', - zone='zone_value', - operation_id='operation_id_value', - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].operation_id == 'operation_id_value' - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_operation_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_operation( - cluster_service.GetOperationRequest(), - project_id='project_id_value', - zone='zone_value', - operation_id='operation_id_value', - name='name_value', - ) - - -def test_cancel_operation(transport: str = 'grpc', request_type=cluster_service.CancelOperationRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_operation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.cancel_operation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.CancelOperationRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_cancel_operation_from_dict(): - test_cancel_operation(request_type=dict) - - -def test_cancel_operation_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_operation), - '__call__') as call: - client.cancel_operation() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.CancelOperationRequest() - - -@pytest.mark.asyncio -async def test_cancel_operation_async(transport: str = 'grpc_asyncio', request_type=cluster_service.CancelOperationRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_operation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.cancel_operation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.CancelOperationRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_cancel_operation_async_from_dict(): - await test_cancel_operation_async(request_type=dict) - - -def test_cancel_operation_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.CancelOperationRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_operation), - '__call__') as call: - call.return_value = None - client.cancel_operation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_cancel_operation_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.CancelOperationRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_operation), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.cancel_operation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_cancel_operation_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_operation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.cancel_operation( - project_id='project_id_value', - zone='zone_value', - operation_id='operation_id_value', - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].operation_id == 'operation_id_value' - assert args[0].name == 'name_value' - - -def test_cancel_operation_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.cancel_operation( - cluster_service.CancelOperationRequest(), - project_id='project_id_value', - zone='zone_value', - operation_id='operation_id_value', - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_cancel_operation_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_operation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.cancel_operation( - project_id='project_id_value', - zone='zone_value', - operation_id='operation_id_value', - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].operation_id == 'operation_id_value' - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_cancel_operation_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.cancel_operation( - cluster_service.CancelOperationRequest(), - project_id='project_id_value', - zone='zone_value', - operation_id='operation_id_value', - name='name_value', - ) - - -def test_get_server_config(transport: str = 'grpc', request_type=cluster_service.GetServerConfigRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_server_config), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.ServerConfig( - default_cluster_version='default_cluster_version_value', - valid_node_versions=['valid_node_versions_value'], - default_image_type='default_image_type_value', - valid_image_types=['valid_image_types_value'], - valid_master_versions=['valid_master_versions_value'], - ) - response = client.get_server_config(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.GetServerConfigRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.ServerConfig) - assert response.default_cluster_version == 'default_cluster_version_value' - assert response.valid_node_versions == ['valid_node_versions_value'] - assert response.default_image_type == 'default_image_type_value' - assert response.valid_image_types == ['valid_image_types_value'] - assert response.valid_master_versions == ['valid_master_versions_value'] - - -def test_get_server_config_from_dict(): - test_get_server_config(request_type=dict) - - -def test_get_server_config_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_server_config), - '__call__') as call: - client.get_server_config() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.GetServerConfigRequest() - - -@pytest.mark.asyncio -async def test_get_server_config_async(transport: str = 'grpc_asyncio', request_type=cluster_service.GetServerConfigRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_server_config), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ServerConfig( - default_cluster_version='default_cluster_version_value', - valid_node_versions=['valid_node_versions_value'], - default_image_type='default_image_type_value', - valid_image_types=['valid_image_types_value'], - valid_master_versions=['valid_master_versions_value'], - )) - response = await client.get_server_config(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.GetServerConfigRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.ServerConfig) - assert response.default_cluster_version == 'default_cluster_version_value' - assert response.valid_node_versions == ['valid_node_versions_value'] - assert response.default_image_type == 'default_image_type_value' - assert response.valid_image_types == ['valid_image_types_value'] - assert response.valid_master_versions == ['valid_master_versions_value'] - - -@pytest.mark.asyncio -async def test_get_server_config_async_from_dict(): - await test_get_server_config_async(request_type=dict) - - -def test_get_server_config_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.GetServerConfigRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_server_config), - '__call__') as call: - call.return_value = cluster_service.ServerConfig() - client.get_server_config(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_server_config_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.GetServerConfigRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_server_config), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ServerConfig()) - await client.get_server_config(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_server_config_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_server_config), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.ServerConfig() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_server_config( - project_id='project_id_value', - zone='zone_value', - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].name == 'name_value' - - -def test_get_server_config_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_server_config( - cluster_service.GetServerConfigRequest(), - project_id='project_id_value', - zone='zone_value', - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_server_config_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_server_config), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.ServerConfig() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ServerConfig()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_server_config( - project_id='project_id_value', - zone='zone_value', - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_server_config_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_server_config( - cluster_service.GetServerConfigRequest(), - project_id='project_id_value', - zone='zone_value', - name='name_value', - ) - - -def test_get_json_web_keys(transport: str = 'grpc', request_type=cluster_service.GetJSONWebKeysRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_json_web_keys), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.GetJSONWebKeysResponse( - ) - response = client.get_json_web_keys(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.GetJSONWebKeysRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.GetJSONWebKeysResponse) - - -def test_get_json_web_keys_from_dict(): - test_get_json_web_keys(request_type=dict) - - -def test_get_json_web_keys_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_json_web_keys), - '__call__') as call: - client.get_json_web_keys() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.GetJSONWebKeysRequest() - - -@pytest.mark.asyncio -async def test_get_json_web_keys_async(transport: str = 'grpc_asyncio', request_type=cluster_service.GetJSONWebKeysRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_json_web_keys), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.GetJSONWebKeysResponse( - )) - response = await client.get_json_web_keys(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.GetJSONWebKeysRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.GetJSONWebKeysResponse) - - -@pytest.mark.asyncio -async def test_get_json_web_keys_async_from_dict(): - await test_get_json_web_keys_async(request_type=dict) - - -def test_get_json_web_keys_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.GetJSONWebKeysRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_json_web_keys), - '__call__') as call: - call.return_value = cluster_service.GetJSONWebKeysResponse() - client.get_json_web_keys(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_json_web_keys_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.GetJSONWebKeysRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_json_web_keys), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.GetJSONWebKeysResponse()) - await client.get_json_web_keys(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_node_pools(transport: str = 'grpc', request_type=cluster_service.ListNodePoolsRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_node_pools), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.ListNodePoolsResponse( - ) - response = client.list_node_pools(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.ListNodePoolsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.ListNodePoolsResponse) - - -def test_list_node_pools_from_dict(): - test_list_node_pools(request_type=dict) - - -def test_list_node_pools_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_node_pools), - '__call__') as call: - client.list_node_pools() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.ListNodePoolsRequest() - - -@pytest.mark.asyncio -async def test_list_node_pools_async(transport: str = 'grpc_asyncio', request_type=cluster_service.ListNodePoolsRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_node_pools), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListNodePoolsResponse( - )) - response = await client.list_node_pools(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.ListNodePoolsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.ListNodePoolsResponse) - - -@pytest.mark.asyncio -async def test_list_node_pools_async_from_dict(): - await test_list_node_pools_async(request_type=dict) - - -def test_list_node_pools_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.ListNodePoolsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_node_pools), - '__call__') as call: - call.return_value = cluster_service.ListNodePoolsResponse() - client.list_node_pools(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_node_pools_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.ListNodePoolsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_node_pools), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListNodePoolsResponse()) - await client.list_node_pools(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_node_pools_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_node_pools), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.ListNodePoolsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_node_pools( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].parent == 'parent_value' - - -def test_list_node_pools_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_node_pools( - cluster_service.ListNodePoolsRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_node_pools_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_node_pools), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.ListNodePoolsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListNodePoolsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_node_pools( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_node_pools_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_node_pools( - cluster_service.ListNodePoolsRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - parent='parent_value', - ) - - -def test_get_node_pool(transport: str = 'grpc', request_type=cluster_service.GetNodePoolRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_node_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.NodePool( - name='name_value', - initial_node_count=1911, - locations=['locations_value'], - self_link='self_link_value', - version='version_value', - instance_group_urls=['instance_group_urls_value'], - status=cluster_service.NodePool.Status.PROVISIONING, - status_message='status_message_value', - pod_ipv4_cidr_size=1856, - ) - response = client.get_node_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.GetNodePoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.NodePool) - assert response.name == 'name_value' - assert response.initial_node_count == 1911 - assert response.locations == ['locations_value'] - assert response.self_link == 'self_link_value' - assert response.version == 'version_value' - assert response.instance_group_urls == ['instance_group_urls_value'] - assert response.status == cluster_service.NodePool.Status.PROVISIONING - assert response.status_message == 'status_message_value' - assert response.pod_ipv4_cidr_size == 1856 - - -def test_get_node_pool_from_dict(): - test_get_node_pool(request_type=dict) - - -def test_get_node_pool_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_node_pool), - '__call__') as call: - client.get_node_pool() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.GetNodePoolRequest() - - -@pytest.mark.asyncio -async def test_get_node_pool_async(transport: str = 'grpc_asyncio', request_type=cluster_service.GetNodePoolRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_node_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.NodePool( - name='name_value', - initial_node_count=1911, - locations=['locations_value'], - self_link='self_link_value', - version='version_value', - instance_group_urls=['instance_group_urls_value'], - status=cluster_service.NodePool.Status.PROVISIONING, - status_message='status_message_value', - pod_ipv4_cidr_size=1856, - )) - response = await client.get_node_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.GetNodePoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.NodePool) - assert response.name == 'name_value' - assert response.initial_node_count == 1911 - assert response.locations == ['locations_value'] - assert response.self_link == 'self_link_value' - assert response.version == 'version_value' - assert response.instance_group_urls == ['instance_group_urls_value'] - assert response.status == cluster_service.NodePool.Status.PROVISIONING - assert response.status_message == 'status_message_value' - assert response.pod_ipv4_cidr_size == 1856 - - -@pytest.mark.asyncio -async def test_get_node_pool_async_from_dict(): - await test_get_node_pool_async(request_type=dict) - - -def test_get_node_pool_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.GetNodePoolRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_node_pool), - '__call__') as call: - call.return_value = cluster_service.NodePool() - client.get_node_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_node_pool_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.GetNodePoolRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_node_pool), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.NodePool()) - await client.get_node_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_node_pool_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_node_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.NodePool() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_node_pool( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - node_pool_id='node_pool_id_value', - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].node_pool_id == 'node_pool_id_value' - assert args[0].name == 'name_value' - - -def test_get_node_pool_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_node_pool( - cluster_service.GetNodePoolRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - node_pool_id='node_pool_id_value', - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_get_node_pool_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_node_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.NodePool() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.NodePool()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_node_pool( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - node_pool_id='node_pool_id_value', - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].node_pool_id == 'node_pool_id_value' - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_get_node_pool_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_node_pool( - cluster_service.GetNodePoolRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - node_pool_id='node_pool_id_value', - name='name_value', - ) - - -def test_create_node_pool(transport: str = 'grpc', request_type=cluster_service.CreateNodePoolRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_node_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - ) - response = client.create_node_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.CreateNodePoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -def test_create_node_pool_from_dict(): - test_create_node_pool(request_type=dict) - - -def test_create_node_pool_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_node_pool), - '__call__') as call: - client.create_node_pool() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.CreateNodePoolRequest() - - -@pytest.mark.asyncio -async def test_create_node_pool_async(transport: str = 'grpc_asyncio', request_type=cluster_service.CreateNodePoolRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_node_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - )) - response = await client.create_node_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.CreateNodePoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -@pytest.mark.asyncio -async def test_create_node_pool_async_from_dict(): - await test_create_node_pool_async(request_type=dict) - - -def test_create_node_pool_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.CreateNodePoolRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_node_pool), - '__call__') as call: - call.return_value = cluster_service.Operation() - client.create_node_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_node_pool_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.CreateNodePoolRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_node_pool), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - await client.create_node_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_node_pool_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_node_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_node_pool( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - node_pool=cluster_service.NodePool(name='name_value'), - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].node_pool == cluster_service.NodePool(name='name_value') - assert args[0].parent == 'parent_value' - - -def test_create_node_pool_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_node_pool( - cluster_service.CreateNodePoolRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - node_pool=cluster_service.NodePool(name='name_value'), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_create_node_pool_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_node_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_node_pool( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - node_pool=cluster_service.NodePool(name='name_value'), - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].node_pool == cluster_service.NodePool(name='name_value') - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_create_node_pool_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_node_pool( - cluster_service.CreateNodePoolRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - node_pool=cluster_service.NodePool(name='name_value'), - parent='parent_value', - ) - - -def test_delete_node_pool(transport: str = 'grpc', request_type=cluster_service.DeleteNodePoolRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_node_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - ) - response = client.delete_node_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.DeleteNodePoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -def test_delete_node_pool_from_dict(): - test_delete_node_pool(request_type=dict) - - -def test_delete_node_pool_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_node_pool), - '__call__') as call: - client.delete_node_pool() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.DeleteNodePoolRequest() - - -@pytest.mark.asyncio -async def test_delete_node_pool_async(transport: str = 'grpc_asyncio', request_type=cluster_service.DeleteNodePoolRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_node_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - )) - response = await client.delete_node_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.DeleteNodePoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -@pytest.mark.asyncio -async def test_delete_node_pool_async_from_dict(): - await test_delete_node_pool_async(request_type=dict) - - -def test_delete_node_pool_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.DeleteNodePoolRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_node_pool), - '__call__') as call: - call.return_value = cluster_service.Operation() - client.delete_node_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_node_pool_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.DeleteNodePoolRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_node_pool), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - await client.delete_node_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_node_pool_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_node_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_node_pool( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - node_pool_id='node_pool_id_value', - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].node_pool_id == 'node_pool_id_value' - assert args[0].name == 'name_value' - - -def test_delete_node_pool_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_node_pool( - cluster_service.DeleteNodePoolRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - node_pool_id='node_pool_id_value', - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_delete_node_pool_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_node_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_node_pool( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - node_pool_id='node_pool_id_value', - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].node_pool_id == 'node_pool_id_value' - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_delete_node_pool_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_node_pool( - cluster_service.DeleteNodePoolRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - node_pool_id='node_pool_id_value', - name='name_value', - ) - - -def test_rollback_node_pool_upgrade(transport: str = 'grpc', request_type=cluster_service.RollbackNodePoolUpgradeRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.rollback_node_pool_upgrade), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - ) - response = client.rollback_node_pool_upgrade(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.RollbackNodePoolUpgradeRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -def test_rollback_node_pool_upgrade_from_dict(): - test_rollback_node_pool_upgrade(request_type=dict) - - -def test_rollback_node_pool_upgrade_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.rollback_node_pool_upgrade), - '__call__') as call: - client.rollback_node_pool_upgrade() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.RollbackNodePoolUpgradeRequest() - - -@pytest.mark.asyncio -async def test_rollback_node_pool_upgrade_async(transport: str = 'grpc_asyncio', request_type=cluster_service.RollbackNodePoolUpgradeRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.rollback_node_pool_upgrade), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - )) - response = await client.rollback_node_pool_upgrade(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.RollbackNodePoolUpgradeRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -@pytest.mark.asyncio -async def test_rollback_node_pool_upgrade_async_from_dict(): - await test_rollback_node_pool_upgrade_async(request_type=dict) - - -def test_rollback_node_pool_upgrade_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.RollbackNodePoolUpgradeRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.rollback_node_pool_upgrade), - '__call__') as call: - call.return_value = cluster_service.Operation() - client.rollback_node_pool_upgrade(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_rollback_node_pool_upgrade_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.RollbackNodePoolUpgradeRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.rollback_node_pool_upgrade), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - await client.rollback_node_pool_upgrade(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_rollback_node_pool_upgrade_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.rollback_node_pool_upgrade), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.rollback_node_pool_upgrade( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - node_pool_id='node_pool_id_value', - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].node_pool_id == 'node_pool_id_value' - assert args[0].name == 'name_value' - - -def test_rollback_node_pool_upgrade_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.rollback_node_pool_upgrade( - cluster_service.RollbackNodePoolUpgradeRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - node_pool_id='node_pool_id_value', - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_rollback_node_pool_upgrade_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.rollback_node_pool_upgrade), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.rollback_node_pool_upgrade( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - node_pool_id='node_pool_id_value', - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].node_pool_id == 'node_pool_id_value' - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_rollback_node_pool_upgrade_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.rollback_node_pool_upgrade( - cluster_service.RollbackNodePoolUpgradeRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - node_pool_id='node_pool_id_value', - name='name_value', - ) - - -def test_set_node_pool_management(transport: str = 'grpc', request_type=cluster_service.SetNodePoolManagementRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_node_pool_management), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - ) - response = client.set_node_pool_management(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetNodePoolManagementRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -def test_set_node_pool_management_from_dict(): - test_set_node_pool_management(request_type=dict) - - -def test_set_node_pool_management_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_node_pool_management), - '__call__') as call: - client.set_node_pool_management() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetNodePoolManagementRequest() - - -@pytest.mark.asyncio -async def test_set_node_pool_management_async(transport: str = 'grpc_asyncio', request_type=cluster_service.SetNodePoolManagementRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_node_pool_management), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - )) - response = await client.set_node_pool_management(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetNodePoolManagementRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -@pytest.mark.asyncio -async def test_set_node_pool_management_async_from_dict(): - await test_set_node_pool_management_async(request_type=dict) - - -def test_set_node_pool_management_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.SetNodePoolManagementRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_node_pool_management), - '__call__') as call: - call.return_value = cluster_service.Operation() - client.set_node_pool_management(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_set_node_pool_management_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.SetNodePoolManagementRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_node_pool_management), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - await client.set_node_pool_management(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_set_labels(transport: str = 'grpc', request_type=cluster_service.SetLabelsRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_labels), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - ) - response = client.set_labels(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetLabelsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -def test_set_labels_from_dict(): - test_set_labels(request_type=dict) - - -def test_set_labels_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_labels), - '__call__') as call: - client.set_labels() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetLabelsRequest() - - -@pytest.mark.asyncio -async def test_set_labels_async(transport: str = 'grpc_asyncio', request_type=cluster_service.SetLabelsRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_labels), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - )) - response = await client.set_labels(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetLabelsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -@pytest.mark.asyncio -async def test_set_labels_async_from_dict(): - await test_set_labels_async(request_type=dict) - - -def test_set_labels_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.SetLabelsRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_labels), - '__call__') as call: - call.return_value = cluster_service.Operation() - client.set_labels(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_set_labels_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.SetLabelsRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_labels), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - await client.set_labels(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_set_legacy_abac(transport: str = 'grpc', request_type=cluster_service.SetLegacyAbacRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_legacy_abac), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - ) - response = client.set_legacy_abac(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetLegacyAbacRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -def test_set_legacy_abac_from_dict(): - test_set_legacy_abac(request_type=dict) - - -def test_set_legacy_abac_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_legacy_abac), - '__call__') as call: - client.set_legacy_abac() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetLegacyAbacRequest() - - -@pytest.mark.asyncio -async def test_set_legacy_abac_async(transport: str = 'grpc_asyncio', request_type=cluster_service.SetLegacyAbacRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_legacy_abac), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - )) - response = await client.set_legacy_abac(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetLegacyAbacRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -@pytest.mark.asyncio -async def test_set_legacy_abac_async_from_dict(): - await test_set_legacy_abac_async(request_type=dict) - - -def test_set_legacy_abac_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.SetLegacyAbacRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_legacy_abac), - '__call__') as call: - call.return_value = cluster_service.Operation() - client.set_legacy_abac(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_set_legacy_abac_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.SetLegacyAbacRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_legacy_abac), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - await client.set_legacy_abac(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_set_legacy_abac_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_legacy_abac), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.set_legacy_abac( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - enabled=True, - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].enabled == True - assert args[0].name == 'name_value' - - -def test_set_legacy_abac_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.set_legacy_abac( - cluster_service.SetLegacyAbacRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - enabled=True, - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_set_legacy_abac_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_legacy_abac), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.set_legacy_abac( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - enabled=True, - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].enabled == True - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_set_legacy_abac_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.set_legacy_abac( - cluster_service.SetLegacyAbacRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - enabled=True, - name='name_value', - ) - - -def test_start_ip_rotation(transport: str = 'grpc', request_type=cluster_service.StartIPRotationRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.start_ip_rotation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - ) - response = client.start_ip_rotation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.StartIPRotationRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -def test_start_ip_rotation_from_dict(): - test_start_ip_rotation(request_type=dict) - - -def test_start_ip_rotation_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.start_ip_rotation), - '__call__') as call: - client.start_ip_rotation() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.StartIPRotationRequest() - - -@pytest.mark.asyncio -async def test_start_ip_rotation_async(transport: str = 'grpc_asyncio', request_type=cluster_service.StartIPRotationRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.start_ip_rotation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - )) - response = await client.start_ip_rotation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.StartIPRotationRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -@pytest.mark.asyncio -async def test_start_ip_rotation_async_from_dict(): - await test_start_ip_rotation_async(request_type=dict) - - -def test_start_ip_rotation_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.StartIPRotationRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.start_ip_rotation), - '__call__') as call: - call.return_value = cluster_service.Operation() - client.start_ip_rotation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_start_ip_rotation_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.StartIPRotationRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.start_ip_rotation), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - await client.start_ip_rotation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_start_ip_rotation_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.start_ip_rotation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.start_ip_rotation( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].name == 'name_value' - - -def test_start_ip_rotation_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.start_ip_rotation( - cluster_service.StartIPRotationRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_start_ip_rotation_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.start_ip_rotation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.start_ip_rotation( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_start_ip_rotation_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.start_ip_rotation( - cluster_service.StartIPRotationRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - name='name_value', - ) - - -def test_complete_ip_rotation(transport: str = 'grpc', request_type=cluster_service.CompleteIPRotationRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.complete_ip_rotation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - ) - response = client.complete_ip_rotation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.CompleteIPRotationRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -def test_complete_ip_rotation_from_dict(): - test_complete_ip_rotation(request_type=dict) - - -def test_complete_ip_rotation_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.complete_ip_rotation), - '__call__') as call: - client.complete_ip_rotation() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.CompleteIPRotationRequest() - - -@pytest.mark.asyncio -async def test_complete_ip_rotation_async(transport: str = 'grpc_asyncio', request_type=cluster_service.CompleteIPRotationRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.complete_ip_rotation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - )) - response = await client.complete_ip_rotation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.CompleteIPRotationRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -@pytest.mark.asyncio -async def test_complete_ip_rotation_async_from_dict(): - await test_complete_ip_rotation_async(request_type=dict) - - -def test_complete_ip_rotation_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.CompleteIPRotationRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.complete_ip_rotation), - '__call__') as call: - call.return_value = cluster_service.Operation() - client.complete_ip_rotation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_complete_ip_rotation_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.CompleteIPRotationRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.complete_ip_rotation), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - await client.complete_ip_rotation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_complete_ip_rotation_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.complete_ip_rotation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.complete_ip_rotation( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].name == 'name_value' - - -def test_complete_ip_rotation_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.complete_ip_rotation( - cluster_service.CompleteIPRotationRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_complete_ip_rotation_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.complete_ip_rotation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.complete_ip_rotation( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_complete_ip_rotation_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.complete_ip_rotation( - cluster_service.CompleteIPRotationRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - name='name_value', - ) - - -def test_set_node_pool_size(transport: str = 'grpc', request_type=cluster_service.SetNodePoolSizeRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_node_pool_size), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - ) - response = client.set_node_pool_size(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetNodePoolSizeRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -def test_set_node_pool_size_from_dict(): - test_set_node_pool_size(request_type=dict) - - -def test_set_node_pool_size_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_node_pool_size), - '__call__') as call: - client.set_node_pool_size() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetNodePoolSizeRequest() - - -@pytest.mark.asyncio -async def test_set_node_pool_size_async(transport: str = 'grpc_asyncio', request_type=cluster_service.SetNodePoolSizeRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_node_pool_size), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - )) - response = await client.set_node_pool_size(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetNodePoolSizeRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -@pytest.mark.asyncio -async def test_set_node_pool_size_async_from_dict(): - await test_set_node_pool_size_async(request_type=dict) - - -def test_set_node_pool_size_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.SetNodePoolSizeRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_node_pool_size), - '__call__') as call: - call.return_value = cluster_service.Operation() - client.set_node_pool_size(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_set_node_pool_size_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.SetNodePoolSizeRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_node_pool_size), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - await client.set_node_pool_size(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_set_network_policy(transport: str = 'grpc', request_type=cluster_service.SetNetworkPolicyRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_network_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - ) - response = client.set_network_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetNetworkPolicyRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -def test_set_network_policy_from_dict(): - test_set_network_policy(request_type=dict) - - -def test_set_network_policy_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_network_policy), - '__call__') as call: - client.set_network_policy() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetNetworkPolicyRequest() - - -@pytest.mark.asyncio -async def test_set_network_policy_async(transport: str = 'grpc_asyncio', request_type=cluster_service.SetNetworkPolicyRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_network_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - )) - response = await client.set_network_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetNetworkPolicyRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -@pytest.mark.asyncio -async def test_set_network_policy_async_from_dict(): - await test_set_network_policy_async(request_type=dict) - - -def test_set_network_policy_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.SetNetworkPolicyRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_network_policy), - '__call__') as call: - call.return_value = cluster_service.Operation() - client.set_network_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_set_network_policy_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.SetNetworkPolicyRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_network_policy), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - await client.set_network_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_set_network_policy_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_network_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.set_network_policy( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - network_policy=cluster_service.NetworkPolicy(provider=cluster_service.NetworkPolicy.Provider.CALICO), - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].network_policy == cluster_service.NetworkPolicy(provider=cluster_service.NetworkPolicy.Provider.CALICO) - assert args[0].name == 'name_value' - - -def test_set_network_policy_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.set_network_policy( - cluster_service.SetNetworkPolicyRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - network_policy=cluster_service.NetworkPolicy(provider=cluster_service.NetworkPolicy.Provider.CALICO), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_set_network_policy_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_network_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.set_network_policy( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - network_policy=cluster_service.NetworkPolicy(provider=cluster_service.NetworkPolicy.Provider.CALICO), - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].network_policy == cluster_service.NetworkPolicy(provider=cluster_service.NetworkPolicy.Provider.CALICO) - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_set_network_policy_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.set_network_policy( - cluster_service.SetNetworkPolicyRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - network_policy=cluster_service.NetworkPolicy(provider=cluster_service.NetworkPolicy.Provider.CALICO), - name='name_value', - ) - - -def test_set_maintenance_policy(transport: str = 'grpc', request_type=cluster_service.SetMaintenancePolicyRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_maintenance_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - ) - response = client.set_maintenance_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetMaintenancePolicyRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -def test_set_maintenance_policy_from_dict(): - test_set_maintenance_policy(request_type=dict) - - -def test_set_maintenance_policy_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_maintenance_policy), - '__call__') as call: - client.set_maintenance_policy() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetMaintenancePolicyRequest() - - -@pytest.mark.asyncio -async def test_set_maintenance_policy_async(transport: str = 'grpc_asyncio', request_type=cluster_service.SetMaintenancePolicyRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_maintenance_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - )) - response = await client.set_maintenance_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetMaintenancePolicyRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -@pytest.mark.asyncio -async def test_set_maintenance_policy_async_from_dict(): - await test_set_maintenance_policy_async(request_type=dict) - - -def test_set_maintenance_policy_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.SetMaintenancePolicyRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_maintenance_policy), - '__call__') as call: - call.return_value = cluster_service.Operation() - client.set_maintenance_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_set_maintenance_policy_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.SetMaintenancePolicyRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_maintenance_policy), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - await client.set_maintenance_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_set_maintenance_policy_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_maintenance_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.set_maintenance_policy( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - maintenance_policy=cluster_service.MaintenancePolicy(window=cluster_service.MaintenanceWindow(daily_maintenance_window=cluster_service.DailyMaintenanceWindow(start_time='start_time_value'))), - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].maintenance_policy == cluster_service.MaintenancePolicy(window=cluster_service.MaintenanceWindow(daily_maintenance_window=cluster_service.DailyMaintenanceWindow(start_time='start_time_value'))) - assert args[0].name == 'name_value' - - -def test_set_maintenance_policy_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.set_maintenance_policy( - cluster_service.SetMaintenancePolicyRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - maintenance_policy=cluster_service.MaintenancePolicy(window=cluster_service.MaintenanceWindow(daily_maintenance_window=cluster_service.DailyMaintenanceWindow(start_time='start_time_value'))), - name='name_value', - ) - - -@pytest.mark.asyncio -async def test_set_maintenance_policy_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_maintenance_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.set_maintenance_policy( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - maintenance_policy=cluster_service.MaintenancePolicy(window=cluster_service.MaintenanceWindow(daily_maintenance_window=cluster_service.DailyMaintenanceWindow(start_time='start_time_value'))), - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].maintenance_policy == cluster_service.MaintenancePolicy(window=cluster_service.MaintenanceWindow(daily_maintenance_window=cluster_service.DailyMaintenanceWindow(start_time='start_time_value'))) - assert args[0].name == 'name_value' - - -@pytest.mark.asyncio -async def test_set_maintenance_policy_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.set_maintenance_policy( - cluster_service.SetMaintenancePolicyRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - maintenance_policy=cluster_service.MaintenancePolicy(window=cluster_service.MaintenanceWindow(daily_maintenance_window=cluster_service.DailyMaintenanceWindow(start_time='start_time_value'))), - name='name_value', - ) - - -def test_list_usable_subnetworks(transport: str = 'grpc', request_type=cluster_service.ListUsableSubnetworksRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_usable_subnetworks), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.ListUsableSubnetworksResponse( - next_page_token='next_page_token_value', - ) - response = client.list_usable_subnetworks(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.ListUsableSubnetworksRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListUsableSubnetworksPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_usable_subnetworks_from_dict(): - test_list_usable_subnetworks(request_type=dict) - - -def test_list_usable_subnetworks_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_usable_subnetworks), - '__call__') as call: - client.list_usable_subnetworks() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.ListUsableSubnetworksRequest() - - -@pytest.mark.asyncio -async def test_list_usable_subnetworks_async(transport: str = 'grpc_asyncio', request_type=cluster_service.ListUsableSubnetworksRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_usable_subnetworks), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListUsableSubnetworksResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_usable_subnetworks(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.ListUsableSubnetworksRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListUsableSubnetworksAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_usable_subnetworks_async_from_dict(): - await test_list_usable_subnetworks_async(request_type=dict) - - -def test_list_usable_subnetworks_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.ListUsableSubnetworksRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_usable_subnetworks), - '__call__') as call: - call.return_value = cluster_service.ListUsableSubnetworksResponse() - client.list_usable_subnetworks(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_usable_subnetworks_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.ListUsableSubnetworksRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_usable_subnetworks), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListUsableSubnetworksResponse()) - await client.list_usable_subnetworks(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_usable_subnetworks_pager(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_usable_subnetworks), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - cluster_service.ListUsableSubnetworksResponse( - subnetworks=[ - cluster_service.UsableSubnetwork(), - cluster_service.UsableSubnetwork(), - cluster_service.UsableSubnetwork(), - ], - next_page_token='abc', - ), - cluster_service.ListUsableSubnetworksResponse( - subnetworks=[], - next_page_token='def', - ), - cluster_service.ListUsableSubnetworksResponse( - subnetworks=[ - cluster_service.UsableSubnetwork(), - ], - next_page_token='ghi', - ), - cluster_service.ListUsableSubnetworksResponse( - subnetworks=[ - cluster_service.UsableSubnetwork(), - cluster_service.UsableSubnetwork(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_usable_subnetworks(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, cluster_service.UsableSubnetwork) - for i in results) - -def test_list_usable_subnetworks_pages(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_usable_subnetworks), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - cluster_service.ListUsableSubnetworksResponse( - subnetworks=[ - cluster_service.UsableSubnetwork(), - cluster_service.UsableSubnetwork(), - cluster_service.UsableSubnetwork(), - ], - next_page_token='abc', - ), - cluster_service.ListUsableSubnetworksResponse( - subnetworks=[], - next_page_token='def', - ), - cluster_service.ListUsableSubnetworksResponse( - subnetworks=[ - cluster_service.UsableSubnetwork(), - ], - next_page_token='ghi', - ), - cluster_service.ListUsableSubnetworksResponse( - subnetworks=[ - cluster_service.UsableSubnetwork(), - cluster_service.UsableSubnetwork(), - ], - ), - RuntimeError, - ) - pages = list(client.list_usable_subnetworks(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_usable_subnetworks_async_pager(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_usable_subnetworks), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - cluster_service.ListUsableSubnetworksResponse( - subnetworks=[ - cluster_service.UsableSubnetwork(), - cluster_service.UsableSubnetwork(), - cluster_service.UsableSubnetwork(), - ], - next_page_token='abc', - ), - cluster_service.ListUsableSubnetworksResponse( - subnetworks=[], - next_page_token='def', - ), - cluster_service.ListUsableSubnetworksResponse( - subnetworks=[ - cluster_service.UsableSubnetwork(), - ], - next_page_token='ghi', - ), - cluster_service.ListUsableSubnetworksResponse( - subnetworks=[ - cluster_service.UsableSubnetwork(), - cluster_service.UsableSubnetwork(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_usable_subnetworks(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, cluster_service.UsableSubnetwork) - for i in responses) - -@pytest.mark.asyncio -async def test_list_usable_subnetworks_async_pages(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_usable_subnetworks), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - cluster_service.ListUsableSubnetworksResponse( - subnetworks=[ - cluster_service.UsableSubnetwork(), - cluster_service.UsableSubnetwork(), - cluster_service.UsableSubnetwork(), - ], - next_page_token='abc', - ), - cluster_service.ListUsableSubnetworksResponse( - subnetworks=[], - next_page_token='def', - ), - cluster_service.ListUsableSubnetworksResponse( - subnetworks=[ - cluster_service.UsableSubnetwork(), - ], - next_page_token='ghi', - ), - cluster_service.ListUsableSubnetworksResponse( - subnetworks=[ - cluster_service.UsableSubnetwork(), - cluster_service.UsableSubnetwork(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_usable_subnetworks(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.ClusterManagerGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.ClusterManagerGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = ClusterManagerClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.ClusterManagerGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = ClusterManagerClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.ClusterManagerGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = ClusterManagerClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.ClusterManagerGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.ClusterManagerGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.ClusterManagerGrpcTransport, - transports.ClusterManagerGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.ClusterManagerGrpcTransport, - ) - -def test_cluster_manager_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.ClusterManagerTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_cluster_manager_base_transport(): - # Instantiate the base transport. - with mock.patch('google.container_v1.services.cluster_manager.transports.ClusterManagerTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.ClusterManagerTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'list_clusters', - 'get_cluster', - 'create_cluster', - 'update_cluster', - 'update_node_pool', - 'set_node_pool_autoscaling', - 'set_logging_service', - 'set_monitoring_service', - 'set_addons_config', - 'set_locations', - 'update_master', - 'set_master_auth', - 'delete_cluster', - 'list_operations', - 'get_operation', - 'cancel_operation', - 'get_server_config', - 'get_json_web_keys', - 'list_node_pools', - 'get_node_pool', - 'create_node_pool', - 'delete_node_pool', - 'rollback_node_pool_upgrade', - 'set_node_pool_management', - 'set_labels', - 'set_legacy_abac', - 'start_ip_rotation', - 'complete_ip_rotation', - 'set_node_pool_size', - 'set_network_policy', - 'set_maintenance_policy', - 'list_usable_subnetworks', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - -@requires_google_auth_gte_1_25_0 -def test_cluster_manager_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.container_v1.services.cluster_manager.transports.ClusterManagerTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.ClusterManagerTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@requires_google_auth_lt_1_25_0 -def test_cluster_manager_base_transport_with_credentials_file_old_google_auth(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.container_v1.services.cluster_manager.transports.ClusterManagerTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.ClusterManagerTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", scopes=( - 'https://www.googleapis.com/auth/cloud-platform', - ), - quota_project_id="octopus", - ) - - -def test_cluster_manager_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.container_v1.services.cluster_manager.transports.ClusterManagerTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.ClusterManagerTransport() - adc.assert_called_once() - - -@requires_google_auth_gte_1_25_0 -def test_cluster_manager_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - ClusterManagerClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@requires_google_auth_lt_1_25_0 -def test_cluster_manager_auth_adc_old_google_auth(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - ClusterManagerClient() - adc.assert_called_once_with( - scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.ClusterManagerGrpcTransport, - transports.ClusterManagerGrpcAsyncIOTransport, - ], -) -@requires_google_auth_gte_1_25_0 -def test_cluster_manager_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.ClusterManagerGrpcTransport, - transports.ClusterManagerGrpcAsyncIOTransport, - ], -) -@requires_google_auth_lt_1_25_0 -def test_cluster_manager_transport_auth_adc_old_google_auth(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus") - adc.assert_called_once_with(scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.ClusterManagerGrpcTransport, grpc_helpers), - (transports.ClusterManagerGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_cluster_manager_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "container.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="container.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.ClusterManagerGrpcTransport, transports.ClusterManagerGrpcAsyncIOTransport]) -def test_cluster_manager_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_cluster_manager_host_no_port(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='container.googleapis.com'), - ) - assert client.transport._host == 'container.googleapis.com:443' - - -def test_cluster_manager_host_with_port(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='container.googleapis.com:8000'), - ) - assert client.transport._host == 'container.googleapis.com:8000' - -def test_cluster_manager_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.ClusterManagerGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_cluster_manager_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.ClusterManagerGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.ClusterManagerGrpcTransport, transports.ClusterManagerGrpcAsyncIOTransport]) -def test_cluster_manager_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.ClusterManagerGrpcTransport, transports.ClusterManagerGrpcAsyncIOTransport]) -def test_cluster_manager_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_common_billing_account_path(): - billing_account = "squid" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = ClusterManagerClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "clam", - } - path = ClusterManagerClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = ClusterManagerClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "whelk" - expected = "folders/{folder}".format(folder=folder, ) - actual = ClusterManagerClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "octopus", - } - path = ClusterManagerClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = ClusterManagerClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "oyster" - expected = "organizations/{organization}".format(organization=organization, ) - actual = ClusterManagerClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "nudibranch", - } - path = ClusterManagerClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = ClusterManagerClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "cuttlefish" - expected = "projects/{project}".format(project=project, ) - actual = ClusterManagerClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "mussel", - } - path = ClusterManagerClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = ClusterManagerClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "winkle" - location = "nautilus" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = ClusterManagerClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "scallop", - "location": "abalone", - } - path = ClusterManagerClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = ClusterManagerClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.ClusterManagerTransport, '_prep_wrapped_messages') as prep: - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.ClusterManagerTransport, '_prep_wrapped_messages') as prep: - transport_class = ClusterManagerClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) diff --git a/owl-bot-staging/v1beta1/.coveragerc b/owl-bot-staging/v1beta1/.coveragerc deleted file mode 100644 index f0a87b59..00000000 --- a/owl-bot-staging/v1beta1/.coveragerc +++ /dev/null @@ -1,17 +0,0 @@ -[run] -branch = True - -[report] -show_missing = True -omit = - google/container/__init__.py -exclude_lines = - # Re-enable the standard pragma - pragma: NO COVER - # Ignore debug-only repr - def __repr__ - # Ignore pkg_resources exceptions. - # This is added at the module level as a safeguard for if someone - # generates the code and tries to run it without pip installing. This - # makes it virtually impossible to test properly. - except pkg_resources.DistributionNotFound diff --git a/owl-bot-staging/v1beta1/MANIFEST.in b/owl-bot-staging/v1beta1/MANIFEST.in deleted file mode 100644 index 36b8dd0a..00000000 --- a/owl-bot-staging/v1beta1/MANIFEST.in +++ /dev/null @@ -1,2 +0,0 @@ -recursive-include google/container *.py -recursive-include google/container_v1beta1 *.py diff --git a/owl-bot-staging/v1beta1/README.rst b/owl-bot-staging/v1beta1/README.rst deleted file mode 100644 index 83d9858c..00000000 --- a/owl-bot-staging/v1beta1/README.rst +++ /dev/null @@ -1,49 +0,0 @@ -Python Client for Google Container API -================================================= - -Quick Start ------------ - -In order to use this library, you first need to go through the following steps: - -1. `Select or create a Cloud Platform project.`_ -2. `Enable billing for your project.`_ -3. Enable the Google Container API. -4. `Setup Authentication.`_ - -.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project -.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project -.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html - -Installation -~~~~~~~~~~~~ - -Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to -create isolated Python environments. The basic problem it addresses is one of -dependencies and versions, and indirectly permissions. - -With `virtualenv`_, it's possible to install this library without needing system -install permissions, and without clashing with the installed system -dependencies. - -.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ - - -Mac/Linux -^^^^^^^^^ - -.. code-block:: console - - python3 -m venv - source /bin/activate - /bin/pip install /path/to/library - - -Windows -^^^^^^^ - -.. code-block:: console - - python3 -m venv - \Scripts\activate - \Scripts\pip.exe install \path\to\library diff --git a/owl-bot-staging/v1beta1/docs/conf.py b/owl-bot-staging/v1beta1/docs/conf.py deleted file mode 100644 index 1f19408e..00000000 --- a/owl-bot-staging/v1beta1/docs/conf.py +++ /dev/null @@ -1,376 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# -# google-container documentation build configuration file -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import sys -import os -import shlex - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath("..")) - -__version__ = "0.1.0" - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -needs_sphinx = "1.6.3" - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - "sphinx.ext.autodoc", - "sphinx.ext.autosummary", - "sphinx.ext.intersphinx", - "sphinx.ext.coverage", - "sphinx.ext.napoleon", - "sphinx.ext.todo", - "sphinx.ext.viewcode", -] - -# autodoc/autosummary flags -autoclass_content = "both" -autodoc_default_flags = ["members"] -autosummary_generate = True - - -# Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] - -# Allow markdown includes (so releases.md can include CHANGLEOG.md) -# http://www.sphinx-doc.org/en/master/markdown.html -source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -source_suffix = [".rst", ".md"] - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = "index" - -# General information about the project. -project = u"google-container" -copyright = u"2020, Google, LLC" -author = u"Google APIs" # TODO: autogenerate this bit - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The full version, including alpha/beta/rc tags. -release = __version__ -# The short X.Y version. -version = ".".join(release.split(".")[0:2]) - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ["_build"] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = "sphinx" - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - -# If true, `todo` and `todoList` produce output, else they produce nothing. -todo_include_todos = True - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = "alabaster" - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -html_theme_options = { - "description": "Google Client Libraries for Python", - "github_user": "googleapis", - "github_repo": "google-cloud-python", - "github_banner": True, - "font_family": "'Roboto', Georgia, sans", - "head_font_family": "'Roboto', Georgia, serif", - "code_font_family": "'Roboto Mono', 'Consolas', monospace", -} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Language to be used for generating the HTML full-text search index. -# Sphinx supports the following languages: -# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' -# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' -# html_search_language = 'en' - -# A dictionary with options for the search language support, empty by default. -# Now only 'ja' uses this config value -# html_search_options = {'type': 'default'} - -# The name of a javascript file (relative to the configuration directory) that -# implements a search results scorer. If empty, the default will be used. -# html_search_scorer = 'scorer.js' - -# Output file base name for HTML help builder. -htmlhelp_basename = "google-container-doc" - -# -- Options for warnings ------------------------------------------------------ - - -suppress_warnings = [ - # Temporarily suppress this to avoid "more than one target found for - # cross-reference" warning, which are intractable for us to avoid while in - # a mono-repo. - # See https://github.com/sphinx-doc/sphinx/blob - # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 - "ref.python" -] - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). - # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. - # 'preamble': '', - # Latex figure (float) alignment - # 'figure_align': 'htbp', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ( - master_doc, - "google-container.tex", - u"google-container Documentation", - author, - "manual", - ) -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ( - master_doc, - "google-container", - u"Google Container Documentation", - [author], - 1, - ) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ( - master_doc, - "google-container", - u"google-container Documentation", - author, - "google-container", - "GAPIC library for Google Container API", - "APIs", - ) -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False - - -# Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = { - "python": ("http://python.readthedocs.org/en/latest/", None), - "gax": ("https://gax-python.readthedocs.org/en/latest/", None), - "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), - "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), - "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None), - "grpc": ("https://grpc.io/grpc/python/", None), - "requests": ("http://requests.kennethreitz.org/en/stable/", None), - "proto": ("https://proto-plus-python.readthedocs.io/en/stable", None), - "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None), -} - - -# Napoleon settings -napoleon_google_docstring = True -napoleon_numpy_docstring = True -napoleon_include_private_with_doc = False -napoleon_include_special_with_doc = True -napoleon_use_admonition_for_examples = False -napoleon_use_admonition_for_notes = False -napoleon_use_admonition_for_references = False -napoleon_use_ivar = False -napoleon_use_param = True -napoleon_use_rtype = True diff --git a/owl-bot-staging/v1beta1/docs/container_v1beta1/cluster_manager.rst b/owl-bot-staging/v1beta1/docs/container_v1beta1/cluster_manager.rst deleted file mode 100644 index 9a9600fb..00000000 --- a/owl-bot-staging/v1beta1/docs/container_v1beta1/cluster_manager.rst +++ /dev/null @@ -1,10 +0,0 @@ -ClusterManager --------------------------------- - -.. automodule:: google.container_v1beta1.services.cluster_manager - :members: - :inherited-members: - -.. automodule:: google.container_v1beta1.services.cluster_manager.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/v1beta1/docs/container_v1beta1/services.rst b/owl-bot-staging/v1beta1/docs/container_v1beta1/services.rst deleted file mode 100644 index 18ed4869..00000000 --- a/owl-bot-staging/v1beta1/docs/container_v1beta1/services.rst +++ /dev/null @@ -1,6 +0,0 @@ -Services for Google Container v1beta1 API -========================================= -.. toctree:: - :maxdepth: 2 - - cluster_manager diff --git a/owl-bot-staging/v1beta1/docs/container_v1beta1/types.rst b/owl-bot-staging/v1beta1/docs/container_v1beta1/types.rst deleted file mode 100644 index 053b05fb..00000000 --- a/owl-bot-staging/v1beta1/docs/container_v1beta1/types.rst +++ /dev/null @@ -1,7 +0,0 @@ -Types for Google Container v1beta1 API -====================================== - -.. automodule:: google.container_v1beta1.types - :members: - :undoc-members: - :show-inheritance: diff --git a/owl-bot-staging/v1beta1/docs/index.rst b/owl-bot-staging/v1beta1/docs/index.rst deleted file mode 100644 index de07690b..00000000 --- a/owl-bot-staging/v1beta1/docs/index.rst +++ /dev/null @@ -1,7 +0,0 @@ -API Reference -------------- -.. toctree:: - :maxdepth: 2 - - container_v1beta1/services - container_v1beta1/types diff --git a/owl-bot-staging/v1beta1/google/container/__init__.py b/owl-bot-staging/v1beta1/google/container/__init__.py deleted file mode 100644 index 22324c69..00000000 --- a/owl-bot-staging/v1beta1/google/container/__init__.py +++ /dev/null @@ -1,249 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from google.container_v1beta1.services.cluster_manager.client import ClusterManagerClient -from google.container_v1beta1.services.cluster_manager.async_client import ClusterManagerAsyncClient - -from google.container_v1beta1.types.cluster_service import AcceleratorConfig -from google.container_v1beta1.types.cluster_service import AddonsConfig -from google.container_v1beta1.types.cluster_service import AuthenticatorGroupsConfig -from google.container_v1beta1.types.cluster_service import AutoprovisioningNodePoolDefaults -from google.container_v1beta1.types.cluster_service import AutoUpgradeOptions -from google.container_v1beta1.types.cluster_service import BinaryAuthorization -from google.container_v1beta1.types.cluster_service import CancelOperationRequest -from google.container_v1beta1.types.cluster_service import ClientCertificateConfig -from google.container_v1beta1.types.cluster_service import CloudRunConfig -from google.container_v1beta1.types.cluster_service import Cluster -from google.container_v1beta1.types.cluster_service import ClusterAutoscaling -from google.container_v1beta1.types.cluster_service import ClusterTelemetry -from google.container_v1beta1.types.cluster_service import ClusterUpdate -from google.container_v1beta1.types.cluster_service import CompleteIPRotationRequest -from google.container_v1beta1.types.cluster_service import ConfidentialNodes -from google.container_v1beta1.types.cluster_service import ConfigConnectorConfig -from google.container_v1beta1.types.cluster_service import CreateClusterRequest -from google.container_v1beta1.types.cluster_service import CreateNodePoolRequest -from google.container_v1beta1.types.cluster_service import DailyMaintenanceWindow -from google.container_v1beta1.types.cluster_service import DatabaseEncryption -from google.container_v1beta1.types.cluster_service import DefaultSnatStatus -from google.container_v1beta1.types.cluster_service import DeleteClusterRequest -from google.container_v1beta1.types.cluster_service import DeleteNodePoolRequest -from google.container_v1beta1.types.cluster_service import DnsCacheConfig -from google.container_v1beta1.types.cluster_service import EphemeralStorageConfig -from google.container_v1beta1.types.cluster_service import GcePersistentDiskCsiDriverConfig -from google.container_v1beta1.types.cluster_service import GetClusterRequest -from google.container_v1beta1.types.cluster_service import GetJSONWebKeysRequest -from google.container_v1beta1.types.cluster_service import GetJSONWebKeysResponse -from google.container_v1beta1.types.cluster_service import GetNodePoolRequest -from google.container_v1beta1.types.cluster_service import GetOpenIDConfigRequest -from google.container_v1beta1.types.cluster_service import GetOpenIDConfigResponse -from google.container_v1beta1.types.cluster_service import GetOperationRequest -from google.container_v1beta1.types.cluster_service import GetServerConfigRequest -from google.container_v1beta1.types.cluster_service import HorizontalPodAutoscaling -from google.container_v1beta1.types.cluster_service import HttpLoadBalancing -from google.container_v1beta1.types.cluster_service import IntraNodeVisibilityConfig -from google.container_v1beta1.types.cluster_service import IPAllocationPolicy -from google.container_v1beta1.types.cluster_service import IstioConfig -from google.container_v1beta1.types.cluster_service import Jwk -from google.container_v1beta1.types.cluster_service import KalmConfig -from google.container_v1beta1.types.cluster_service import KubernetesDashboard -from google.container_v1beta1.types.cluster_service import LegacyAbac -from google.container_v1beta1.types.cluster_service import LinuxNodeConfig -from google.container_v1beta1.types.cluster_service import ListClustersRequest -from google.container_v1beta1.types.cluster_service import ListClustersResponse -from google.container_v1beta1.types.cluster_service import ListLocationsRequest -from google.container_v1beta1.types.cluster_service import ListLocationsResponse -from google.container_v1beta1.types.cluster_service import ListNodePoolsRequest -from google.container_v1beta1.types.cluster_service import ListNodePoolsResponse -from google.container_v1beta1.types.cluster_service import ListOperationsRequest -from google.container_v1beta1.types.cluster_service import ListOperationsResponse -from google.container_v1beta1.types.cluster_service import ListUsableSubnetworksRequest -from google.container_v1beta1.types.cluster_service import ListUsableSubnetworksResponse -from google.container_v1beta1.types.cluster_service import Location -from google.container_v1beta1.types.cluster_service import MaintenancePolicy -from google.container_v1beta1.types.cluster_service import MaintenanceWindow -from google.container_v1beta1.types.cluster_service import Master -from google.container_v1beta1.types.cluster_service import MasterAuth -from google.container_v1beta1.types.cluster_service import MasterAuthorizedNetworksConfig -from google.container_v1beta1.types.cluster_service import MaxPodsConstraint -from google.container_v1beta1.types.cluster_service import NetworkConfig -from google.container_v1beta1.types.cluster_service import NetworkPolicy -from google.container_v1beta1.types.cluster_service import NetworkPolicyConfig -from google.container_v1beta1.types.cluster_service import NodeConfig -from google.container_v1beta1.types.cluster_service import NodeKubeletConfig -from google.container_v1beta1.types.cluster_service import NodeManagement -from google.container_v1beta1.types.cluster_service import NodePool -from google.container_v1beta1.types.cluster_service import NodePoolAutoscaling -from google.container_v1beta1.types.cluster_service import NodeTaint -from google.container_v1beta1.types.cluster_service import NotificationConfig -from google.container_v1beta1.types.cluster_service import Operation -from google.container_v1beta1.types.cluster_service import OperationProgress -from google.container_v1beta1.types.cluster_service import PodSecurityPolicyConfig -from google.container_v1beta1.types.cluster_service import PrivateClusterConfig -from google.container_v1beta1.types.cluster_service import PrivateClusterMasterGlobalAccessConfig -from google.container_v1beta1.types.cluster_service import RecurringTimeWindow -from google.container_v1beta1.types.cluster_service import ReleaseChannel -from google.container_v1beta1.types.cluster_service import ReservationAffinity -from google.container_v1beta1.types.cluster_service import ResourceLimit -from google.container_v1beta1.types.cluster_service import ResourceUsageExportConfig -from google.container_v1beta1.types.cluster_service import RollbackNodePoolUpgradeRequest -from google.container_v1beta1.types.cluster_service import SandboxConfig -from google.container_v1beta1.types.cluster_service import ServerConfig -from google.container_v1beta1.types.cluster_service import SetAddonsConfigRequest -from google.container_v1beta1.types.cluster_service import SetLabelsRequest -from google.container_v1beta1.types.cluster_service import SetLegacyAbacRequest -from google.container_v1beta1.types.cluster_service import SetLocationsRequest -from google.container_v1beta1.types.cluster_service import SetLoggingServiceRequest -from google.container_v1beta1.types.cluster_service import SetMaintenancePolicyRequest -from google.container_v1beta1.types.cluster_service import SetMasterAuthRequest -from google.container_v1beta1.types.cluster_service import SetMonitoringServiceRequest -from google.container_v1beta1.types.cluster_service import SetNetworkPolicyRequest -from google.container_v1beta1.types.cluster_service import SetNodePoolAutoscalingRequest -from google.container_v1beta1.types.cluster_service import SetNodePoolManagementRequest -from google.container_v1beta1.types.cluster_service import SetNodePoolSizeRequest -from google.container_v1beta1.types.cluster_service import ShieldedInstanceConfig -from google.container_v1beta1.types.cluster_service import ShieldedNodes -from google.container_v1beta1.types.cluster_service import StartIPRotationRequest -from google.container_v1beta1.types.cluster_service import StatusCondition -from google.container_v1beta1.types.cluster_service import TimeWindow -from google.container_v1beta1.types.cluster_service import TpuConfig -from google.container_v1beta1.types.cluster_service import UpdateClusterRequest -from google.container_v1beta1.types.cluster_service import UpdateMasterRequest -from google.container_v1beta1.types.cluster_service import UpdateNodePoolRequest -from google.container_v1beta1.types.cluster_service import UpgradeEvent -from google.container_v1beta1.types.cluster_service import UsableSubnetwork -from google.container_v1beta1.types.cluster_service import UsableSubnetworkSecondaryRange -from google.container_v1beta1.types.cluster_service import VerticalPodAutoscaling -from google.container_v1beta1.types.cluster_service import WorkloadIdentityConfig -from google.container_v1beta1.types.cluster_service import WorkloadMetadataConfig -from google.container_v1beta1.types.cluster_service import DatapathProvider -from google.container_v1beta1.types.cluster_service import UpgradeResourceType - -__all__ = ('ClusterManagerClient', - 'ClusterManagerAsyncClient', - 'AcceleratorConfig', - 'AddonsConfig', - 'AuthenticatorGroupsConfig', - 'AutoprovisioningNodePoolDefaults', - 'AutoUpgradeOptions', - 'BinaryAuthorization', - 'CancelOperationRequest', - 'ClientCertificateConfig', - 'CloudRunConfig', - 'Cluster', - 'ClusterAutoscaling', - 'ClusterTelemetry', - 'ClusterUpdate', - 'CompleteIPRotationRequest', - 'ConfidentialNodes', - 'ConfigConnectorConfig', - 'CreateClusterRequest', - 'CreateNodePoolRequest', - 'DailyMaintenanceWindow', - 'DatabaseEncryption', - 'DefaultSnatStatus', - 'DeleteClusterRequest', - 'DeleteNodePoolRequest', - 'DnsCacheConfig', - 'EphemeralStorageConfig', - 'GcePersistentDiskCsiDriverConfig', - 'GetClusterRequest', - 'GetJSONWebKeysRequest', - 'GetJSONWebKeysResponse', - 'GetNodePoolRequest', - 'GetOpenIDConfigRequest', - 'GetOpenIDConfigResponse', - 'GetOperationRequest', - 'GetServerConfigRequest', - 'HorizontalPodAutoscaling', - 'HttpLoadBalancing', - 'IntraNodeVisibilityConfig', - 'IPAllocationPolicy', - 'IstioConfig', - 'Jwk', - 'KalmConfig', - 'KubernetesDashboard', - 'LegacyAbac', - 'LinuxNodeConfig', - 'ListClustersRequest', - 'ListClustersResponse', - 'ListLocationsRequest', - 'ListLocationsResponse', - 'ListNodePoolsRequest', - 'ListNodePoolsResponse', - 'ListOperationsRequest', - 'ListOperationsResponse', - 'ListUsableSubnetworksRequest', - 'ListUsableSubnetworksResponse', - 'Location', - 'MaintenancePolicy', - 'MaintenanceWindow', - 'Master', - 'MasterAuth', - 'MasterAuthorizedNetworksConfig', - 'MaxPodsConstraint', - 'NetworkConfig', - 'NetworkPolicy', - 'NetworkPolicyConfig', - 'NodeConfig', - 'NodeKubeletConfig', - 'NodeManagement', - 'NodePool', - 'NodePoolAutoscaling', - 'NodeTaint', - 'NotificationConfig', - 'Operation', - 'OperationProgress', - 'PodSecurityPolicyConfig', - 'PrivateClusterConfig', - 'PrivateClusterMasterGlobalAccessConfig', - 'RecurringTimeWindow', - 'ReleaseChannel', - 'ReservationAffinity', - 'ResourceLimit', - 'ResourceUsageExportConfig', - 'RollbackNodePoolUpgradeRequest', - 'SandboxConfig', - 'ServerConfig', - 'SetAddonsConfigRequest', - 'SetLabelsRequest', - 'SetLegacyAbacRequest', - 'SetLocationsRequest', - 'SetLoggingServiceRequest', - 'SetMaintenancePolicyRequest', - 'SetMasterAuthRequest', - 'SetMonitoringServiceRequest', - 'SetNetworkPolicyRequest', - 'SetNodePoolAutoscalingRequest', - 'SetNodePoolManagementRequest', - 'SetNodePoolSizeRequest', - 'ShieldedInstanceConfig', - 'ShieldedNodes', - 'StartIPRotationRequest', - 'StatusCondition', - 'TimeWindow', - 'TpuConfig', - 'UpdateClusterRequest', - 'UpdateMasterRequest', - 'UpdateNodePoolRequest', - 'UpgradeEvent', - 'UsableSubnetwork', - 'UsableSubnetworkSecondaryRange', - 'VerticalPodAutoscaling', - 'WorkloadIdentityConfig', - 'WorkloadMetadataConfig', - 'DatapathProvider', - 'UpgradeResourceType', -) diff --git a/owl-bot-staging/v1beta1/google/container/py.typed b/owl-bot-staging/v1beta1/google/container/py.typed deleted file mode 100644 index fd835114..00000000 --- a/owl-bot-staging/v1beta1/google/container/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-container package uses inline types. diff --git a/owl-bot-staging/v1beta1/google/container_v1beta1/__init__.py b/owl-bot-staging/v1beta1/google/container_v1beta1/__init__.py deleted file mode 100644 index 81480c82..00000000 --- a/owl-bot-staging/v1beta1/google/container_v1beta1/__init__.py +++ /dev/null @@ -1,250 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from .services.cluster_manager import ClusterManagerClient -from .services.cluster_manager import ClusterManagerAsyncClient - -from .types.cluster_service import AcceleratorConfig -from .types.cluster_service import AddonsConfig -from .types.cluster_service import AuthenticatorGroupsConfig -from .types.cluster_service import AutoprovisioningNodePoolDefaults -from .types.cluster_service import AutoUpgradeOptions -from .types.cluster_service import BinaryAuthorization -from .types.cluster_service import CancelOperationRequest -from .types.cluster_service import ClientCertificateConfig -from .types.cluster_service import CloudRunConfig -from .types.cluster_service import Cluster -from .types.cluster_service import ClusterAutoscaling -from .types.cluster_service import ClusterTelemetry -from .types.cluster_service import ClusterUpdate -from .types.cluster_service import CompleteIPRotationRequest -from .types.cluster_service import ConfidentialNodes -from .types.cluster_service import ConfigConnectorConfig -from .types.cluster_service import CreateClusterRequest -from .types.cluster_service import CreateNodePoolRequest -from .types.cluster_service import DailyMaintenanceWindow -from .types.cluster_service import DatabaseEncryption -from .types.cluster_service import DefaultSnatStatus -from .types.cluster_service import DeleteClusterRequest -from .types.cluster_service import DeleteNodePoolRequest -from .types.cluster_service import DnsCacheConfig -from .types.cluster_service import EphemeralStorageConfig -from .types.cluster_service import GcePersistentDiskCsiDriverConfig -from .types.cluster_service import GetClusterRequest -from .types.cluster_service import GetJSONWebKeysRequest -from .types.cluster_service import GetJSONWebKeysResponse -from .types.cluster_service import GetNodePoolRequest -from .types.cluster_service import GetOpenIDConfigRequest -from .types.cluster_service import GetOpenIDConfigResponse -from .types.cluster_service import GetOperationRequest -from .types.cluster_service import GetServerConfigRequest -from .types.cluster_service import HorizontalPodAutoscaling -from .types.cluster_service import HttpLoadBalancing -from .types.cluster_service import IntraNodeVisibilityConfig -from .types.cluster_service import IPAllocationPolicy -from .types.cluster_service import IstioConfig -from .types.cluster_service import Jwk -from .types.cluster_service import KalmConfig -from .types.cluster_service import KubernetesDashboard -from .types.cluster_service import LegacyAbac -from .types.cluster_service import LinuxNodeConfig -from .types.cluster_service import ListClustersRequest -from .types.cluster_service import ListClustersResponse -from .types.cluster_service import ListLocationsRequest -from .types.cluster_service import ListLocationsResponse -from .types.cluster_service import ListNodePoolsRequest -from .types.cluster_service import ListNodePoolsResponse -from .types.cluster_service import ListOperationsRequest -from .types.cluster_service import ListOperationsResponse -from .types.cluster_service import ListUsableSubnetworksRequest -from .types.cluster_service import ListUsableSubnetworksResponse -from .types.cluster_service import Location -from .types.cluster_service import MaintenancePolicy -from .types.cluster_service import MaintenanceWindow -from .types.cluster_service import Master -from .types.cluster_service import MasterAuth -from .types.cluster_service import MasterAuthorizedNetworksConfig -from .types.cluster_service import MaxPodsConstraint -from .types.cluster_service import NetworkConfig -from .types.cluster_service import NetworkPolicy -from .types.cluster_service import NetworkPolicyConfig -from .types.cluster_service import NodeConfig -from .types.cluster_service import NodeKubeletConfig -from .types.cluster_service import NodeManagement -from .types.cluster_service import NodePool -from .types.cluster_service import NodePoolAutoscaling -from .types.cluster_service import NodeTaint -from .types.cluster_service import NotificationConfig -from .types.cluster_service import Operation -from .types.cluster_service import OperationProgress -from .types.cluster_service import PodSecurityPolicyConfig -from .types.cluster_service import PrivateClusterConfig -from .types.cluster_service import PrivateClusterMasterGlobalAccessConfig -from .types.cluster_service import RecurringTimeWindow -from .types.cluster_service import ReleaseChannel -from .types.cluster_service import ReservationAffinity -from .types.cluster_service import ResourceLimit -from .types.cluster_service import ResourceUsageExportConfig -from .types.cluster_service import RollbackNodePoolUpgradeRequest -from .types.cluster_service import SandboxConfig -from .types.cluster_service import ServerConfig -from .types.cluster_service import SetAddonsConfigRequest -from .types.cluster_service import SetLabelsRequest -from .types.cluster_service import SetLegacyAbacRequest -from .types.cluster_service import SetLocationsRequest -from .types.cluster_service import SetLoggingServiceRequest -from .types.cluster_service import SetMaintenancePolicyRequest -from .types.cluster_service import SetMasterAuthRequest -from .types.cluster_service import SetMonitoringServiceRequest -from .types.cluster_service import SetNetworkPolicyRequest -from .types.cluster_service import SetNodePoolAutoscalingRequest -from .types.cluster_service import SetNodePoolManagementRequest -from .types.cluster_service import SetNodePoolSizeRequest -from .types.cluster_service import ShieldedInstanceConfig -from .types.cluster_service import ShieldedNodes -from .types.cluster_service import StartIPRotationRequest -from .types.cluster_service import StatusCondition -from .types.cluster_service import TimeWindow -from .types.cluster_service import TpuConfig -from .types.cluster_service import UpdateClusterRequest -from .types.cluster_service import UpdateMasterRequest -from .types.cluster_service import UpdateNodePoolRequest -from .types.cluster_service import UpgradeEvent -from .types.cluster_service import UsableSubnetwork -from .types.cluster_service import UsableSubnetworkSecondaryRange -from .types.cluster_service import VerticalPodAutoscaling -from .types.cluster_service import WorkloadIdentityConfig -from .types.cluster_service import WorkloadMetadataConfig -from .types.cluster_service import DatapathProvider -from .types.cluster_service import UpgradeResourceType - -__all__ = ( - 'ClusterManagerAsyncClient', -'AcceleratorConfig', -'AddonsConfig', -'AuthenticatorGroupsConfig', -'AutoUpgradeOptions', -'AutoprovisioningNodePoolDefaults', -'BinaryAuthorization', -'CancelOperationRequest', -'ClientCertificateConfig', -'CloudRunConfig', -'Cluster', -'ClusterAutoscaling', -'ClusterManagerClient', -'ClusterTelemetry', -'ClusterUpdate', -'CompleteIPRotationRequest', -'ConfidentialNodes', -'ConfigConnectorConfig', -'CreateClusterRequest', -'CreateNodePoolRequest', -'DailyMaintenanceWindow', -'DatabaseEncryption', -'DatapathProvider', -'DefaultSnatStatus', -'DeleteClusterRequest', -'DeleteNodePoolRequest', -'DnsCacheConfig', -'EphemeralStorageConfig', -'GcePersistentDiskCsiDriverConfig', -'GetClusterRequest', -'GetJSONWebKeysRequest', -'GetJSONWebKeysResponse', -'GetNodePoolRequest', -'GetOpenIDConfigRequest', -'GetOpenIDConfigResponse', -'GetOperationRequest', -'GetServerConfigRequest', -'HorizontalPodAutoscaling', -'HttpLoadBalancing', -'IPAllocationPolicy', -'IntraNodeVisibilityConfig', -'IstioConfig', -'Jwk', -'KalmConfig', -'KubernetesDashboard', -'LegacyAbac', -'LinuxNodeConfig', -'ListClustersRequest', -'ListClustersResponse', -'ListLocationsRequest', -'ListLocationsResponse', -'ListNodePoolsRequest', -'ListNodePoolsResponse', -'ListOperationsRequest', -'ListOperationsResponse', -'ListUsableSubnetworksRequest', -'ListUsableSubnetworksResponse', -'Location', -'MaintenancePolicy', -'MaintenanceWindow', -'Master', -'MasterAuth', -'MasterAuthorizedNetworksConfig', -'MaxPodsConstraint', -'NetworkConfig', -'NetworkPolicy', -'NetworkPolicyConfig', -'NodeConfig', -'NodeKubeletConfig', -'NodeManagement', -'NodePool', -'NodePoolAutoscaling', -'NodeTaint', -'NotificationConfig', -'Operation', -'OperationProgress', -'PodSecurityPolicyConfig', -'PrivateClusterConfig', -'PrivateClusterMasterGlobalAccessConfig', -'RecurringTimeWindow', -'ReleaseChannel', -'ReservationAffinity', -'ResourceLimit', -'ResourceUsageExportConfig', -'RollbackNodePoolUpgradeRequest', -'SandboxConfig', -'ServerConfig', -'SetAddonsConfigRequest', -'SetLabelsRequest', -'SetLegacyAbacRequest', -'SetLocationsRequest', -'SetLoggingServiceRequest', -'SetMaintenancePolicyRequest', -'SetMasterAuthRequest', -'SetMonitoringServiceRequest', -'SetNetworkPolicyRequest', -'SetNodePoolAutoscalingRequest', -'SetNodePoolManagementRequest', -'SetNodePoolSizeRequest', -'ShieldedInstanceConfig', -'ShieldedNodes', -'StartIPRotationRequest', -'StatusCondition', -'TimeWindow', -'TpuConfig', -'UpdateClusterRequest', -'UpdateMasterRequest', -'UpdateNodePoolRequest', -'UpgradeEvent', -'UpgradeResourceType', -'UsableSubnetwork', -'UsableSubnetworkSecondaryRange', -'VerticalPodAutoscaling', -'WorkloadIdentityConfig', -'WorkloadMetadataConfig', -) diff --git a/owl-bot-staging/v1beta1/google/container_v1beta1/gapic_metadata.json b/owl-bot-staging/v1beta1/google/container_v1beta1/gapic_metadata.json deleted file mode 100644 index 0ff8e5d5..00000000 --- a/owl-bot-staging/v1beta1/google/container_v1beta1/gapic_metadata.json +++ /dev/null @@ -1,353 +0,0 @@ - { - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", - "language": "python", - "libraryPackage": "google.container_v1beta1", - "protoPackage": "google.container.v1beta1", - "schema": "1.0", - "services": { - "ClusterManager": { - "clients": { - "grpc": { - "libraryClient": "ClusterManagerClient", - "rpcs": { - "CancelOperation": { - "methods": [ - "cancel_operation" - ] - }, - "CompleteIPRotation": { - "methods": [ - "complete_ip_rotation" - ] - }, - "CreateCluster": { - "methods": [ - "create_cluster" - ] - }, - "CreateNodePool": { - "methods": [ - "create_node_pool" - ] - }, - "DeleteCluster": { - "methods": [ - "delete_cluster" - ] - }, - "DeleteNodePool": { - "methods": [ - "delete_node_pool" - ] - }, - "GetCluster": { - "methods": [ - "get_cluster" - ] - }, - "GetJSONWebKeys": { - "methods": [ - "get_json_web_keys" - ] - }, - "GetNodePool": { - "methods": [ - "get_node_pool" - ] - }, - "GetOperation": { - "methods": [ - "get_operation" - ] - }, - "GetServerConfig": { - "methods": [ - "get_server_config" - ] - }, - "ListClusters": { - "methods": [ - "list_clusters" - ] - }, - "ListLocations": { - "methods": [ - "list_locations" - ] - }, - "ListNodePools": { - "methods": [ - "list_node_pools" - ] - }, - "ListOperations": { - "methods": [ - "list_operations" - ] - }, - "ListUsableSubnetworks": { - "methods": [ - "list_usable_subnetworks" - ] - }, - "RollbackNodePoolUpgrade": { - "methods": [ - "rollback_node_pool_upgrade" - ] - }, - "SetAddonsConfig": { - "methods": [ - "set_addons_config" - ] - }, - "SetLabels": { - "methods": [ - "set_labels" - ] - }, - "SetLegacyAbac": { - "methods": [ - "set_legacy_abac" - ] - }, - "SetLocations": { - "methods": [ - "set_locations" - ] - }, - "SetLoggingService": { - "methods": [ - "set_logging_service" - ] - }, - "SetMaintenancePolicy": { - "methods": [ - "set_maintenance_policy" - ] - }, - "SetMasterAuth": { - "methods": [ - "set_master_auth" - ] - }, - "SetMonitoringService": { - "methods": [ - "set_monitoring_service" - ] - }, - "SetNetworkPolicy": { - "methods": [ - "set_network_policy" - ] - }, - "SetNodePoolAutoscaling": { - "methods": [ - "set_node_pool_autoscaling" - ] - }, - "SetNodePoolManagement": { - "methods": [ - "set_node_pool_management" - ] - }, - "SetNodePoolSize": { - "methods": [ - "set_node_pool_size" - ] - }, - "StartIPRotation": { - "methods": [ - "start_ip_rotation" - ] - }, - "UpdateCluster": { - "methods": [ - "update_cluster" - ] - }, - "UpdateMaster": { - "methods": [ - "update_master" - ] - }, - "UpdateNodePool": { - "methods": [ - "update_node_pool" - ] - } - } - }, - "grpc-async": { - "libraryClient": "ClusterManagerAsyncClient", - "rpcs": { - "CancelOperation": { - "methods": [ - "cancel_operation" - ] - }, - "CompleteIPRotation": { - "methods": [ - "complete_ip_rotation" - ] - }, - "CreateCluster": { - "methods": [ - "create_cluster" - ] - }, - "CreateNodePool": { - "methods": [ - "create_node_pool" - ] - }, - "DeleteCluster": { - "methods": [ - "delete_cluster" - ] - }, - "DeleteNodePool": { - "methods": [ - "delete_node_pool" - ] - }, - "GetCluster": { - "methods": [ - "get_cluster" - ] - }, - "GetJSONWebKeys": { - "methods": [ - "get_json_web_keys" - ] - }, - "GetNodePool": { - "methods": [ - "get_node_pool" - ] - }, - "GetOperation": { - "methods": [ - "get_operation" - ] - }, - "GetServerConfig": { - "methods": [ - "get_server_config" - ] - }, - "ListClusters": { - "methods": [ - "list_clusters" - ] - }, - "ListLocations": { - "methods": [ - "list_locations" - ] - }, - "ListNodePools": { - "methods": [ - "list_node_pools" - ] - }, - "ListOperations": { - "methods": [ - "list_operations" - ] - }, - "ListUsableSubnetworks": { - "methods": [ - "list_usable_subnetworks" - ] - }, - "RollbackNodePoolUpgrade": { - "methods": [ - "rollback_node_pool_upgrade" - ] - }, - "SetAddonsConfig": { - "methods": [ - "set_addons_config" - ] - }, - "SetLabels": { - "methods": [ - "set_labels" - ] - }, - "SetLegacyAbac": { - "methods": [ - "set_legacy_abac" - ] - }, - "SetLocations": { - "methods": [ - "set_locations" - ] - }, - "SetLoggingService": { - "methods": [ - "set_logging_service" - ] - }, - "SetMaintenancePolicy": { - "methods": [ - "set_maintenance_policy" - ] - }, - "SetMasterAuth": { - "methods": [ - "set_master_auth" - ] - }, - "SetMonitoringService": { - "methods": [ - "set_monitoring_service" - ] - }, - "SetNetworkPolicy": { - "methods": [ - "set_network_policy" - ] - }, - "SetNodePoolAutoscaling": { - "methods": [ - "set_node_pool_autoscaling" - ] - }, - "SetNodePoolManagement": { - "methods": [ - "set_node_pool_management" - ] - }, - "SetNodePoolSize": { - "methods": [ - "set_node_pool_size" - ] - }, - "StartIPRotation": { - "methods": [ - "start_ip_rotation" - ] - }, - "UpdateCluster": { - "methods": [ - "update_cluster" - ] - }, - "UpdateMaster": { - "methods": [ - "update_master" - ] - }, - "UpdateNodePool": { - "methods": [ - "update_node_pool" - ] - } - } - } - } - } - } -} diff --git a/owl-bot-staging/v1beta1/google/container_v1beta1/py.typed b/owl-bot-staging/v1beta1/google/container_v1beta1/py.typed deleted file mode 100644 index fd835114..00000000 --- a/owl-bot-staging/v1beta1/google/container_v1beta1/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-container package uses inline types. diff --git a/owl-bot-staging/v1beta1/google/container_v1beta1/services/__init__.py b/owl-bot-staging/v1beta1/google/container_v1beta1/services/__init__.py deleted file mode 100644 index 4de65971..00000000 --- a/owl-bot-staging/v1beta1/google/container_v1beta1/services/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/__init__.py b/owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/__init__.py deleted file mode 100644 index 490efad3..00000000 --- a/owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import ClusterManagerClient -from .async_client import ClusterManagerAsyncClient - -__all__ = ( - 'ClusterManagerClient', - 'ClusterManagerAsyncClient', -) diff --git a/owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/async_client.py b/owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/async_client.py deleted file mode 100644 index 14848aa7..00000000 --- a/owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/async_client.py +++ /dev/null @@ -1,3632 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Sequence, Tuple, Type, Union -import pkg_resources -import warnings - -import google.api_core.client_options as ClientOptions # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.container_v1beta1.services.cluster_manager import pagers -from google.container_v1beta1.types import cluster_service -from google.rpc import status_pb2 # type: ignore -from .transports.base import ClusterManagerTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import ClusterManagerGrpcAsyncIOTransport -from .client import ClusterManagerClient - - -class ClusterManagerAsyncClient: - """Google Kubernetes Engine Cluster Manager v1beta1""" - - _client: ClusterManagerClient - - DEFAULT_ENDPOINT = ClusterManagerClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = ClusterManagerClient.DEFAULT_MTLS_ENDPOINT - - topic_path = staticmethod(ClusterManagerClient.topic_path) - parse_topic_path = staticmethod(ClusterManagerClient.parse_topic_path) - common_billing_account_path = staticmethod(ClusterManagerClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(ClusterManagerClient.parse_common_billing_account_path) - common_folder_path = staticmethod(ClusterManagerClient.common_folder_path) - parse_common_folder_path = staticmethod(ClusterManagerClient.parse_common_folder_path) - common_organization_path = staticmethod(ClusterManagerClient.common_organization_path) - parse_common_organization_path = staticmethod(ClusterManagerClient.parse_common_organization_path) - common_project_path = staticmethod(ClusterManagerClient.common_project_path) - parse_common_project_path = staticmethod(ClusterManagerClient.parse_common_project_path) - common_location_path = staticmethod(ClusterManagerClient.common_location_path) - parse_common_location_path = staticmethod(ClusterManagerClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - ClusterManagerAsyncClient: The constructed client. - """ - return ClusterManagerClient.from_service_account_info.__func__(ClusterManagerAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - ClusterManagerAsyncClient: The constructed client. - """ - return ClusterManagerClient.from_service_account_file.__func__(ClusterManagerAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> ClusterManagerTransport: - """Returns the transport used by the client instance. - - Returns: - ClusterManagerTransport: The transport used by the client instance. - """ - return self._client.transport - - get_transport_class = functools.partial(type(ClusterManagerClient).get_transport_class, type(ClusterManagerClient)) - - def __init__(self, *, - credentials: ga_credentials.Credentials = None, - transport: Union[str, ClusterManagerTransport] = "grpc_asyncio", - client_options: ClientOptions = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the cluster manager client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.ClusterManagerTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = ClusterManagerClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def list_clusters(self, - request: cluster_service.ListClustersRequest = None, - *, - project_id: str = None, - zone: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.ListClustersResponse: - r"""Lists all clusters owned by a project in either the - specified zone or all zones. - - Args: - request (:class:`google.container_v1beta1.types.ListClustersRequest`): - The request object. ListClustersRequest lists clusters. - project_id (:class:`str`): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the - parent field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - in which the cluster resides, or "-" for all zones. This - field has been deprecated and replaced by the parent - field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.ListClustersResponse: - ListClustersResponse is the result of - ListClustersRequest. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.ListClustersRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_clusters, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=20.0, - ), - default_timeout=20.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_cluster(self, - request: cluster_service.GetClusterRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Cluster: - r"""Gets the details for a specific cluster. - - Args: - request (:class:`google.container_v1beta1.types.GetClusterRequest`): - The request object. GetClusterRequest gets the settings - of a cluster. - project_id (:class:`str`): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (:class:`str`): - Required. Deprecated. The name of the - cluster to retrieve. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.Cluster: - A Google Kubernetes Engine cluster. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.GetClusterRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_cluster, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=20.0, - ), - default_timeout=20.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def create_cluster(self, - request: cluster_service.CreateClusterRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster: cluster_service.Cluster = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Creates a cluster, consisting of the specified number and type - of Google Compute Engine instances. - - By default, the cluster is created in the project's `default - network `__. - - One firewall is added for the cluster. After cluster creation, - the Kubelet creates routes for each node to allow the containers - on that node to communicate with all other instances in the - cluster. - - Finally, an entry is added to the project's global metadata - indicating which CIDR range the cluster is using. - - Args: - request (:class:`google.container_v1beta1.types.CreateClusterRequest`): - The request object. CreateClusterRequest creates a - cluster. - project_id (:class:`str`): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the - parent field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the parent field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster (:class:`google.container_v1beta1.types.Cluster`): - Required. A `cluster - resource `__ - - This corresponds to the ``cluster`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.CreateClusterRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster is not None: - request.cluster = cluster - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_cluster, - default_timeout=45.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_cluster(self, - request: cluster_service.UpdateClusterRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - update: cluster_service.ClusterUpdate = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Updates the settings for a specific cluster. - - Args: - request (:class:`google.container_v1beta1.types.UpdateClusterRequest`): - The request object. UpdateClusterRequest updates the - settings of a cluster. - project_id (:class:`str`): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (:class:`str`): - Required. Deprecated. The name of the - cluster to upgrade. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update (:class:`google.container_v1beta1.types.ClusterUpdate`): - Required. A description of the - update. - - This corresponds to the ``update`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, update]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.UpdateClusterRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if update is not None: - request.update = update - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_cluster, - default_timeout=45.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_node_pool(self, - request: cluster_service.UpdateNodePoolRequest = None, - *, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Updates the version and/or image type of a specific - node pool. - - Args: - request (:class:`google.container_v1beta1.types.UpdateNodePoolRequest`): - The request object. SetNodePoolVersionRequest updates - the version of a node pool. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - request = cluster_service.UpdateNodePoolRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_node_pool, - default_timeout=45.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def set_node_pool_autoscaling(self, - request: cluster_service.SetNodePoolAutoscalingRequest = None, - *, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Sets the autoscaling settings of a specific node - pool. - - Args: - request (:class:`google.container_v1beta1.types.SetNodePoolAutoscalingRequest`): - The request object. SetNodePoolAutoscalingRequest sets - the autoscaler settings of a node pool. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - request = cluster_service.SetNodePoolAutoscalingRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.set_node_pool_autoscaling, - default_timeout=45.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def set_logging_service(self, - request: cluster_service.SetLoggingServiceRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - logging_service: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Sets the logging service for a specific cluster. - - Args: - request (:class:`google.container_v1beta1.types.SetLoggingServiceRequest`): - The request object. SetLoggingServiceRequest sets the - logging service of a cluster. - project_id (:class:`str`): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (:class:`str`): - Required. Deprecated. The name of the - cluster to upgrade. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - logging_service (:class:`str`): - Required. The logging service the cluster should use to - write logs. Currently available options: - - - ``logging.googleapis.com/kubernetes`` - The Cloud - Logging service with a Kubernetes-native resource - model - - ``logging.googleapis.com`` - The legacy Cloud Logging - service (no longer available as of GKE 1.15). - - ``none`` - no logs will be exported from the cluster. - - If left as an empty - string,\ ``logging.googleapis.com/kubernetes`` will be - used for GKE 1.14+ or ``logging.googleapis.com`` for - earlier versions. - - This corresponds to the ``logging_service`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, logging_service]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.SetLoggingServiceRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if logging_service is not None: - request.logging_service = logging_service - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.set_logging_service, - default_timeout=45.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def set_monitoring_service(self, - request: cluster_service.SetMonitoringServiceRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - monitoring_service: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Sets the monitoring service for a specific cluster. - - Args: - request (:class:`google.container_v1beta1.types.SetMonitoringServiceRequest`): - The request object. SetMonitoringServiceRequest sets the - monitoring service of a cluster. - project_id (:class:`str`): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (:class:`str`): - Required. Deprecated. The name of the - cluster to upgrade. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - monitoring_service (:class:`str`): - Required. The monitoring service the cluster should use - to write metrics. Currently available options: - - - "monitoring.googleapis.com/kubernetes" - The Cloud - Monitoring service with a Kubernetes-native resource - model - - ``monitoring.googleapis.com`` - The legacy Cloud - Monitoring service (no longer available as of GKE - 1.15). - - ``none`` - No metrics will be exported from the - cluster. - - If left as an empty - string,\ ``monitoring.googleapis.com/kubernetes`` will - be used for GKE 1.14+ or ``monitoring.googleapis.com`` - for earlier versions. - - This corresponds to the ``monitoring_service`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, monitoring_service]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.SetMonitoringServiceRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if monitoring_service is not None: - request.monitoring_service = monitoring_service - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.set_monitoring_service, - default_timeout=45.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def set_addons_config(self, - request: cluster_service.SetAddonsConfigRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - addons_config: cluster_service.AddonsConfig = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Sets the addons for a specific cluster. - - Args: - request (:class:`google.container_v1beta1.types.SetAddonsConfigRequest`): - The request object. SetAddonsRequest sets the addons - associated with the cluster. - project_id (:class:`str`): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (:class:`str`): - Required. Deprecated. The name of the - cluster to upgrade. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - addons_config (:class:`google.container_v1beta1.types.AddonsConfig`): - Required. The desired configurations - for the various addons available to run - in the cluster. - - This corresponds to the ``addons_config`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, addons_config]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.SetAddonsConfigRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if addons_config is not None: - request.addons_config = addons_config - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.set_addons_config, - default_timeout=45.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def set_locations(self, - request: cluster_service.SetLocationsRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - locations: Sequence[str] = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Sets the locations for a specific cluster. Deprecated. Use - `projects.locations.clusters.update `__ - instead. - - Args: - request (:class:`google.container_v1beta1.types.SetLocationsRequest`): - The request object. SetLocationsRequest sets the - locations of the cluster. - project_id (:class:`str`): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (:class:`str`): - Required. Deprecated. The name of the - cluster to upgrade. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - locations (:class:`Sequence[str]`): - Required. The desired list of Google Compute Engine - `zones `__ - in which the cluster's nodes should be located. Changing - the locations a cluster is in will result in nodes being - either created or removed from the cluster, depending on - whether locations are being added or removed. - - This list must always include the cluster's primary - zone. - - This corresponds to the ``locations`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - warnings.warn("ClusterManagerAsyncClient.set_locations is deprecated", - DeprecationWarning) - - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, locations]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.SetLocationsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if locations: - request.locations.extend(locations) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.set_locations, - default_timeout=45.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_master(self, - request: cluster_service.UpdateMasterRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - master_version: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Updates the master for a specific cluster. - - Args: - request (:class:`google.container_v1beta1.types.UpdateMasterRequest`): - The request object. UpdateMasterRequest updates the - master of the cluster. - project_id (:class:`str`): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (:class:`str`): - Required. Deprecated. The name of the - cluster to upgrade. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - master_version (:class:`str`): - Required. The Kubernetes version to - change the master to. - Users may specify either explicit - versions offered by Kubernetes Engine or - version aliases, which have the - following behavior: - - "latest": picks the highest valid - Kubernetes version - "1.X": picks the - highest valid patch+gke.N patch in the - 1.X version - "1.X.Y": picks the highest - valid gke.N patch in the 1.X.Y version - - "1.X.Y-gke.N": picks an explicit - Kubernetes version - "-": picks the - default Kubernetes version - - This corresponds to the ``master_version`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, master_version]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.UpdateMasterRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if master_version is not None: - request.master_version = master_version - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_master, - default_timeout=45.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def set_master_auth(self, - request: cluster_service.SetMasterAuthRequest = None, - *, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Sets master auth materials. Currently supports - changing the admin password or a specific cluster, - either via password generation or explicitly setting the - password. - - Args: - request (:class:`google.container_v1beta1.types.SetMasterAuthRequest`): - The request object. SetMasterAuthRequest updates the - admin password of a cluster. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - request = cluster_service.SetMasterAuthRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.set_master_auth, - default_timeout=45.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_cluster(self, - request: cluster_service.DeleteClusterRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Deletes the cluster, including the Kubernetes - endpoint and all worker nodes. - - Firewalls and routes that were configured during cluster - creation are also deleted. - - Other Google Compute Engine resources that might be in - use by the cluster, such as load balancer resources, are - not deleted if they weren't present when the cluster was - initially created. - - Args: - request (:class:`google.container_v1beta1.types.DeleteClusterRequest`): - The request object. DeleteClusterRequest deletes a - cluster. - project_id (:class:`str`): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (:class:`str`): - Required. Deprecated. The name of the - cluster to delete. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.DeleteClusterRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_cluster, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=20.0, - ), - default_timeout=20.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_operations(self, - request: cluster_service.ListOperationsRequest = None, - *, - project_id: str = None, - zone: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.ListOperationsResponse: - r"""Lists all operations in a project in the specified - zone or all zones. - - Args: - request (:class:`google.container_v1beta1.types.ListOperationsRequest`): - The request object. ListOperationsRequest lists - operations. - project_id (:class:`str`): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the - parent field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - to return operations for, or ``-`` for all zones. This - field has been deprecated and replaced by the parent - field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.ListOperationsResponse: - ListOperationsResponse is the result - of ListOperationsRequest. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.ListOperationsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_operations, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=20.0, - ), - default_timeout=20.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_operation(self, - request: cluster_service.GetOperationRequest = None, - *, - project_id: str = None, - zone: str = None, - operation_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Gets the specified operation. - - Args: - request (:class:`google.container_v1beta1.types.GetOperationRequest`): - The request object. GetOperationRequest gets a single - operation. - project_id (:class:`str`): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - operation_id (:class:`str`): - Required. Deprecated. The server-assigned ``name`` of - the operation. This field has been deprecated and - replaced by the name field. - - This corresponds to the ``operation_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, operation_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.GetOperationRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if operation_id is not None: - request.operation_id = operation_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_operation, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=20.0, - ), - default_timeout=20.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def cancel_operation(self, - request: cluster_service.CancelOperationRequest = None, - *, - project_id: str = None, - zone: str = None, - operation_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels the specified operation. - - Args: - request (:class:`google.container_v1beta1.types.CancelOperationRequest`): - The request object. CancelOperationRequest cancels a - single operation. - project_id (:class:`str`): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - in which the operation resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - operation_id (:class:`str`): - Required. Deprecated. The server-assigned ``name`` of - the operation. This field has been deprecated and - replaced by the name field. - - This corresponds to the ``operation_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, operation_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.CancelOperationRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if operation_id is not None: - request.operation_id = operation_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.cancel_operation, - default_timeout=45.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def get_server_config(self, - request: cluster_service.GetServerConfigRequest = None, - *, - project_id: str = None, - zone: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.ServerConfig: - r"""Returns configuration info about the Google - Kubernetes Engine service. - - Args: - request (:class:`google.container_v1beta1.types.GetServerConfigRequest`): - The request object. Gets the current Kubernetes Engine - service configuration. - project_id (:class:`str`): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - to return operations for. This field has been deprecated - and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.ServerConfig: - Kubernetes Engine service - configuration. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.GetServerConfigRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_server_config, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=20.0, - ), - default_timeout=20.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_node_pools(self, - request: cluster_service.ListNodePoolsRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.ListNodePoolsResponse: - r"""Lists the node pools for a cluster. - - Args: - request (:class:`google.container_v1beta1.types.ListNodePoolsRequest`): - The request object. ListNodePoolsRequest lists the node - pool(s) for a cluster. - project_id (:class:`str`): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the - parent field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the parent field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (:class:`str`): - Required. Deprecated. The name of the - cluster. This field has been deprecated - and replaced by the parent field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.ListNodePoolsResponse: - ListNodePoolsResponse is the result - of ListNodePoolsRequest. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.ListNodePoolsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_node_pools, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=20.0, - ), - default_timeout=20.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_json_web_keys(self, - request: cluster_service.GetJSONWebKeysRequest = None, - *, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.GetJSONWebKeysResponse: - r"""Gets the public component of the cluster signing keys - in JSON Web Key format. - This API is not yet intended for general use, and is not - available for all clusters. - - Args: - request (:class:`google.container_v1beta1.types.GetJSONWebKeysRequest`): - The request object. GetJSONWebKeysRequest gets the - public component of the keys used by the cluster to sign - token requests. This will be the jwks_uri for the - discover document returned by getOpenIDConfig. See the - OpenID Connect Discovery 1.0 specification for details. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.GetJSONWebKeysResponse: - GetJSONWebKeysResponse is a valid - JSON Web Key Set as specififed in rfc - 7517 - - """ - # Create or coerce a protobuf request object. - request = cluster_service.GetJSONWebKeysRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_json_web_keys, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_node_pool(self, - request: cluster_service.GetNodePoolRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - node_pool_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.NodePool: - r"""Retrieves the requested node pool. - - Args: - request (:class:`google.container_v1beta1.types.GetNodePoolRequest`): - The request object. GetNodePoolRequest retrieves a node - pool for a cluster. - project_id (:class:`str`): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (:class:`str`): - Required. Deprecated. The name of the - cluster. This field has been deprecated - and replaced by the name field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - node_pool_id (:class:`str`): - Required. Deprecated. The name of the - node pool. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``node_pool_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.NodePool: - NodePool contains the name and - configuration for a cluster's node pool. - Node pools are a set of nodes (i.e. - VM's), with a common configuration and - specification, under the control of the - cluster master. They may have a set of - Kubernetes labels applied to them, which - may be used to reference them during pod - scheduling. They may also be resized up - or down, to accommodate the workload. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, node_pool_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.GetNodePoolRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if node_pool_id is not None: - request.node_pool_id = node_pool_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_node_pool, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=20.0, - ), - default_timeout=20.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def create_node_pool(self, - request: cluster_service.CreateNodePoolRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - node_pool: cluster_service.NodePool = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Creates a node pool for a cluster. - - Args: - request (:class:`google.container_v1beta1.types.CreateNodePoolRequest`): - The request object. CreateNodePoolRequest creates a node - pool for a cluster. - project_id (:class:`str`): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the - parent field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the parent field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (:class:`str`): - Required. Deprecated. The name of the - cluster. This field has been deprecated - and replaced by the parent field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - node_pool (:class:`google.container_v1beta1.types.NodePool`): - Required. The node pool to create. - This corresponds to the ``node_pool`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, node_pool]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.CreateNodePoolRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if node_pool is not None: - request.node_pool = node_pool - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_node_pool, - default_timeout=45.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_node_pool(self, - request: cluster_service.DeleteNodePoolRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - node_pool_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Deletes a node pool from a cluster. - - Args: - request (:class:`google.container_v1beta1.types.DeleteNodePoolRequest`): - The request object. DeleteNodePoolRequest deletes a node - pool for a cluster. - project_id (:class:`str`): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (:class:`str`): - Required. Deprecated. The name of the - cluster. This field has been deprecated - and replaced by the name field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - node_pool_id (:class:`str`): - Required. Deprecated. The name of the - node pool to delete. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``node_pool_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, node_pool_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.DeleteNodePoolRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if node_pool_id is not None: - request.node_pool_id = node_pool_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_node_pool, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=20.0, - ), - default_timeout=20.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def rollback_node_pool_upgrade(self, - request: cluster_service.RollbackNodePoolUpgradeRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - node_pool_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Rolls back a previously Aborted or Failed NodePool - upgrade. This makes no changes if the last upgrade - successfully completed. - - Args: - request (:class:`google.container_v1beta1.types.RollbackNodePoolUpgradeRequest`): - The request object. RollbackNodePoolUpgradeRequest - rollbacks the previously Aborted or Failed NodePool - upgrade. This will be an no-op if the last upgrade - successfully completed. - project_id (:class:`str`): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (:class:`str`): - Required. Deprecated. The name of the - cluster to rollback. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - node_pool_id (:class:`str`): - Required. Deprecated. The name of the - node pool to rollback. This field has - been deprecated and replaced by the name - field. - - This corresponds to the ``node_pool_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, node_pool_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.RollbackNodePoolUpgradeRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if node_pool_id is not None: - request.node_pool_id = node_pool_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.rollback_node_pool_upgrade, - default_timeout=45.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def set_node_pool_management(self, - request: cluster_service.SetNodePoolManagementRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - node_pool_id: str = None, - management: cluster_service.NodeManagement = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Sets the NodeManagement options for a node pool. - - Args: - request (:class:`google.container_v1beta1.types.SetNodePoolManagementRequest`): - The request object. SetNodePoolManagementRequest sets - the node management properties of a node pool. - project_id (:class:`str`): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (:class:`str`): - Required. Deprecated. The name of the - cluster to update. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - node_pool_id (:class:`str`): - Required. Deprecated. The name of the - node pool to update. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``node_pool_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - management (:class:`google.container_v1beta1.types.NodeManagement`): - Required. NodeManagement - configuration for the node pool. - - This corresponds to the ``management`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, node_pool_id, management]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.SetNodePoolManagementRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if node_pool_id is not None: - request.node_pool_id = node_pool_id - if management is not None: - request.management = management - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.set_node_pool_management, - default_timeout=45.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def set_labels(self, - request: cluster_service.SetLabelsRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - resource_labels: Sequence[cluster_service.SetLabelsRequest.ResourceLabelsEntry] = None, - label_fingerprint: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Sets labels on a cluster. - - Args: - request (:class:`google.container_v1beta1.types.SetLabelsRequest`): - The request object. SetLabelsRequest sets the Google - Cloud Platform labels on a Google Container Engine - cluster, which will in turn set them for Google Compute - Engine resources used by that cluster - project_id (:class:`str`): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (:class:`str`): - Required. Deprecated. The name of the - cluster. This field has been deprecated - and replaced by the name field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - resource_labels (:class:`Sequence[google.container_v1beta1.types.SetLabelsRequest.ResourceLabelsEntry]`): - Required. The labels to set for that - cluster. - - This corresponds to the ``resource_labels`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - label_fingerprint (:class:`str`): - Required. The fingerprint of the previous set of labels - for this resource, used to detect conflicts. The - fingerprint is initially generated by Kubernetes Engine - and changes after every request to modify or update - labels. You must always provide an up-to-date - fingerprint hash when updating or changing labels. Make - a ``get()`` request to the resource to get the latest - fingerprint. - - This corresponds to the ``label_fingerprint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, resource_labels, label_fingerprint]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.SetLabelsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if label_fingerprint is not None: - request.label_fingerprint = label_fingerprint - - if resource_labels: - request.resource_labels.update(resource_labels) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.set_labels, - default_timeout=45.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def set_legacy_abac(self, - request: cluster_service.SetLegacyAbacRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - enabled: bool = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Enables or disables the ABAC authorization mechanism - on a cluster. - - Args: - request (:class:`google.container_v1beta1.types.SetLegacyAbacRequest`): - The request object. SetLegacyAbacRequest enables or - disables the ABAC authorization mechanism for a cluster. - project_id (:class:`str`): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (:class:`str`): - Required. Deprecated. The name of the - cluster to update. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - enabled (:class:`bool`): - Required. Whether ABAC authorization - will be enabled in the cluster. - - This corresponds to the ``enabled`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, enabled]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.SetLegacyAbacRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if enabled is not None: - request.enabled = enabled - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.set_legacy_abac, - default_timeout=45.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def start_ip_rotation(self, - request: cluster_service.StartIPRotationRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Starts master IP rotation. - - Args: - request (:class:`google.container_v1beta1.types.StartIPRotationRequest`): - The request object. StartIPRotationRequest creates a new - IP for the cluster and then performs a node upgrade on - each node pool to point to the new IP. - project_id (:class:`str`): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (:class:`str`): - Required. Deprecated. The name of the - cluster. This field has been deprecated - and replaced by the name field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.StartIPRotationRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.start_ip_rotation, - default_timeout=45.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def complete_ip_rotation(self, - request: cluster_service.CompleteIPRotationRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Completes master IP rotation. - - Args: - request (:class:`google.container_v1beta1.types.CompleteIPRotationRequest`): - The request object. CompleteIPRotationRequest moves the - cluster master back into single-IP mode. - project_id (:class:`str`): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (:class:`str`): - Required. Deprecated. The name of the - cluster. This field has been deprecated - and replaced by the name field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.CompleteIPRotationRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.complete_ip_rotation, - default_timeout=45.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def set_node_pool_size(self, - request: cluster_service.SetNodePoolSizeRequest = None, - *, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Sets the size for a specific node pool. - - Args: - request (:class:`google.container_v1beta1.types.SetNodePoolSizeRequest`): - The request object. SetNodePoolSizeRequest sets the size - a node pool. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - request = cluster_service.SetNodePoolSizeRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.set_node_pool_size, - default_timeout=45.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def set_network_policy(self, - request: cluster_service.SetNetworkPolicyRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - network_policy: cluster_service.NetworkPolicy = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Enables or disables Network Policy for a cluster. - - Args: - request (:class:`google.container_v1beta1.types.SetNetworkPolicyRequest`): - The request object. SetNetworkPolicyRequest - enables/disables network policy for a cluster. - project_id (:class:`str`): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (:class:`str`): - Required. Deprecated. The name of the - cluster. This field has been deprecated - and replaced by the name field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - network_policy (:class:`google.container_v1beta1.types.NetworkPolicy`): - Required. Configuration options for - the NetworkPolicy feature. - - This corresponds to the ``network_policy`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, network_policy]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.SetNetworkPolicyRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if network_policy is not None: - request.network_policy = network_policy - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.set_network_policy, - default_timeout=45.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def set_maintenance_policy(self, - request: cluster_service.SetMaintenancePolicyRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - maintenance_policy: cluster_service.MaintenancePolicy = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Sets the maintenance policy for a cluster. - - Args: - request (:class:`google.container_v1beta1.types.SetMaintenancePolicyRequest`): - The request object. SetMaintenancePolicyRequest sets the - maintenance policy for a cluster. - project_id (:class:`str`): - Required. The Google Developers Console `project ID or - project - number `__. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (:class:`str`): - Required. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (:class:`str`): - Required. The name of the cluster to - update. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - maintenance_policy (:class:`google.container_v1beta1.types.MaintenancePolicy`): - Required. The maintenance policy to - be set for the cluster. An empty field - clears the existing maintenance policy. - - This corresponds to the ``maintenance_policy`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, maintenance_policy]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.SetMaintenancePolicyRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if maintenance_policy is not None: - request.maintenance_policy = maintenance_policy - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.set_maintenance_policy, - default_timeout=45.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_usable_subnetworks(self, - request: cluster_service.ListUsableSubnetworksRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListUsableSubnetworksAsyncPager: - r"""Lists subnetworks that can be used for creating - clusters in a project. - - Args: - request (:class:`google.container_v1beta1.types.ListUsableSubnetworksRequest`): - The request object. ListUsableSubnetworksRequest - requests the list of usable subnetworks. available to a - user for creating clusters. - parent (:class:`str`): - Required. The parent project where subnetworks are - usable. Specified in the format ``projects/*``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.services.cluster_manager.pagers.ListUsableSubnetworksAsyncPager: - ListUsableSubnetworksResponse is the - response of - ListUsableSubnetworksRequest. - - Iterating over this object will yield - results and resolve additional pages - automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.ListUsableSubnetworksRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_usable_subnetworks, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=20.0, - ), - default_timeout=20.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListUsableSubnetworksAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_locations(self, - request: cluster_service.ListLocationsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.ListLocationsResponse: - r"""Fetches locations that offer Google Kubernetes - Engine. - - Args: - request (:class:`google.container_v1beta1.types.ListLocationsRequest`): - The request object. ListLocationsRequest is used to - request the locations that offer GKE. - parent (:class:`str`): - Required. Contains the name of the resource requested. - Specified in the format ``projects/*``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.ListLocationsResponse: - ListLocationsResponse returns the - list of all GKE locations and their - recommendation state. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = cluster_service.ListLocationsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_locations, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=20.0, - ), - default_timeout=20.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-container", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "ClusterManagerAsyncClient", -) diff --git a/owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/client.py b/owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/client.py deleted file mode 100644 index 2f6dfa92..00000000 --- a/owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/client.py +++ /dev/null @@ -1,3750 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from distutils import util -import os -import re -from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union -import pkg_resources -import warnings - -from google.api_core import client_options as client_options_lib # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.container_v1beta1.services.cluster_manager import pagers -from google.container_v1beta1.types import cluster_service -from google.rpc import status_pb2 # type: ignore -from .transports.base import ClusterManagerTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import ClusterManagerGrpcTransport -from .transports.grpc_asyncio import ClusterManagerGrpcAsyncIOTransport - - -class ClusterManagerClientMeta(type): - """Metaclass for the ClusterManager client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[ClusterManagerTransport]] - _transport_registry["grpc"] = ClusterManagerGrpcTransport - _transport_registry["grpc_asyncio"] = ClusterManagerGrpcAsyncIOTransport - - def get_transport_class(cls, - label: str = None, - ) -> Type[ClusterManagerTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class ClusterManagerClient(metaclass=ClusterManagerClientMeta): - """Google Kubernetes Engine Cluster Manager v1beta1""" - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - DEFAULT_ENDPOINT = "container.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - ClusterManagerClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - ClusterManagerClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> ClusterManagerTransport: - """Returns the transport used by the client instance. - - Returns: - ClusterManagerTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def topic_path(project: str,topic: str,) -> str: - """Returns a fully-qualified topic string.""" - return "projects/{project}/topics/{topic}".format(project=project, topic=topic, ) - - @staticmethod - def parse_topic_path(path: str) -> Dict[str,str]: - """Parses a topic path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/topics/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, ClusterManagerTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the cluster manager client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ClusterManagerTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - - # Create SSL credentials for mutual TLS if needed. - use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))) - - client_cert_source_func = None - is_mtls = False - if use_client_cert: - if client_options.client_cert_source: - is_mtls = True - client_cert_source_func = client_options.client_cert_source - else: - is_mtls = mtls.has_default_client_cert_source() - if is_mtls: - client_cert_source_func = mtls.default_client_cert_source() - else: - client_cert_source_func = None - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - else: - use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_mtls_env == "never": - api_endpoint = self.DEFAULT_ENDPOINT - elif use_mtls_env == "always": - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - elif use_mtls_env == "auto": - if is_mtls: - api_endpoint = self.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = self.DEFAULT_ENDPOINT - else: - raise MutualTLSChannelError( - "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " - "values: never, auto, always" - ) - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - if isinstance(transport, ClusterManagerTransport): - # transport is a ClusterManagerTransport instance. - if credentials or client_options.credentials_file: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = transport - else: - Transport = type(self).get_transport_class(transport) - self._transport = Transport( - credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, - client_info=client_info, - always_use_jwt_access=( - Transport == type(self).get_transport_class("grpc") - or Transport == type(self).get_transport_class("grpc_asyncio") - ), - ) - - def list_clusters(self, - request: cluster_service.ListClustersRequest = None, - *, - project_id: str = None, - zone: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.ListClustersResponse: - r"""Lists all clusters owned by a project in either the - specified zone or all zones. - - Args: - request (google.container_v1beta1.types.ListClustersRequest): - The request object. ListClustersRequest lists clusters. - project_id (str): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the - parent field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - in which the cluster resides, or "-" for all zones. This - field has been deprecated and replaced by the parent - field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.ListClustersResponse: - ListClustersResponse is the result of - ListClustersRequest. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.ListClustersRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.ListClustersRequest): - request = cluster_service.ListClustersRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_clusters] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_cluster(self, - request: cluster_service.GetClusterRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Cluster: - r"""Gets the details for a specific cluster. - - Args: - request (google.container_v1beta1.types.GetClusterRequest): - The request object. GetClusterRequest gets the settings - of a cluster. - project_id (str): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (str): - Required. Deprecated. The name of the - cluster to retrieve. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.Cluster: - A Google Kubernetes Engine cluster. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.GetClusterRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.GetClusterRequest): - request = cluster_service.GetClusterRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_cluster] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def create_cluster(self, - request: cluster_service.CreateClusterRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster: cluster_service.Cluster = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Creates a cluster, consisting of the specified number and type - of Google Compute Engine instances. - - By default, the cluster is created in the project's `default - network `__. - - One firewall is added for the cluster. After cluster creation, - the Kubelet creates routes for each node to allow the containers - on that node to communicate with all other instances in the - cluster. - - Finally, an entry is added to the project's global metadata - indicating which CIDR range the cluster is using. - - Args: - request (google.container_v1beta1.types.CreateClusterRequest): - The request object. CreateClusterRequest creates a - cluster. - project_id (str): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the - parent field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the parent field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster (google.container_v1beta1.types.Cluster): - Required. A `cluster - resource `__ - - This corresponds to the ``cluster`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.CreateClusterRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.CreateClusterRequest): - request = cluster_service.CreateClusterRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster is not None: - request.cluster = cluster - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_cluster] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_cluster(self, - request: cluster_service.UpdateClusterRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - update: cluster_service.ClusterUpdate = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Updates the settings for a specific cluster. - - Args: - request (google.container_v1beta1.types.UpdateClusterRequest): - The request object. UpdateClusterRequest updates the - settings of a cluster. - project_id (str): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (str): - Required. Deprecated. The name of the - cluster to upgrade. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update (google.container_v1beta1.types.ClusterUpdate): - Required. A description of the - update. - - This corresponds to the ``update`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, update]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.UpdateClusterRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.UpdateClusterRequest): - request = cluster_service.UpdateClusterRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if update is not None: - request.update = update - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_cluster] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_node_pool(self, - request: cluster_service.UpdateNodePoolRequest = None, - *, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Updates the version and/or image type of a specific - node pool. - - Args: - request (google.container_v1beta1.types.UpdateNodePoolRequest): - The request object. SetNodePoolVersionRequest updates - the version of a node pool. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.UpdateNodePoolRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.UpdateNodePoolRequest): - request = cluster_service.UpdateNodePoolRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_node_pool] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def set_node_pool_autoscaling(self, - request: cluster_service.SetNodePoolAutoscalingRequest = None, - *, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Sets the autoscaling settings of a specific node - pool. - - Args: - request (google.container_v1beta1.types.SetNodePoolAutoscalingRequest): - The request object. SetNodePoolAutoscalingRequest sets - the autoscaler settings of a node pool. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.SetNodePoolAutoscalingRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.SetNodePoolAutoscalingRequest): - request = cluster_service.SetNodePoolAutoscalingRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.set_node_pool_autoscaling] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def set_logging_service(self, - request: cluster_service.SetLoggingServiceRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - logging_service: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Sets the logging service for a specific cluster. - - Args: - request (google.container_v1beta1.types.SetLoggingServiceRequest): - The request object. SetLoggingServiceRequest sets the - logging service of a cluster. - project_id (str): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (str): - Required. Deprecated. The name of the - cluster to upgrade. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - logging_service (str): - Required. The logging service the cluster should use to - write logs. Currently available options: - - - ``logging.googleapis.com/kubernetes`` - The Cloud - Logging service with a Kubernetes-native resource - model - - ``logging.googleapis.com`` - The legacy Cloud Logging - service (no longer available as of GKE 1.15). - - ``none`` - no logs will be exported from the cluster. - - If left as an empty - string,\ ``logging.googleapis.com/kubernetes`` will be - used for GKE 1.14+ or ``logging.googleapis.com`` for - earlier versions. - - This corresponds to the ``logging_service`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, logging_service]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.SetLoggingServiceRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.SetLoggingServiceRequest): - request = cluster_service.SetLoggingServiceRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if logging_service is not None: - request.logging_service = logging_service - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.set_logging_service] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def set_monitoring_service(self, - request: cluster_service.SetMonitoringServiceRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - monitoring_service: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Sets the monitoring service for a specific cluster. - - Args: - request (google.container_v1beta1.types.SetMonitoringServiceRequest): - The request object. SetMonitoringServiceRequest sets the - monitoring service of a cluster. - project_id (str): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (str): - Required. Deprecated. The name of the - cluster to upgrade. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - monitoring_service (str): - Required. The monitoring service the cluster should use - to write metrics. Currently available options: - - - "monitoring.googleapis.com/kubernetes" - The Cloud - Monitoring service with a Kubernetes-native resource - model - - ``monitoring.googleapis.com`` - The legacy Cloud - Monitoring service (no longer available as of GKE - 1.15). - - ``none`` - No metrics will be exported from the - cluster. - - If left as an empty - string,\ ``monitoring.googleapis.com/kubernetes`` will - be used for GKE 1.14+ or ``monitoring.googleapis.com`` - for earlier versions. - - This corresponds to the ``monitoring_service`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, monitoring_service]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.SetMonitoringServiceRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.SetMonitoringServiceRequest): - request = cluster_service.SetMonitoringServiceRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if monitoring_service is not None: - request.monitoring_service = monitoring_service - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.set_monitoring_service] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def set_addons_config(self, - request: cluster_service.SetAddonsConfigRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - addons_config: cluster_service.AddonsConfig = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Sets the addons for a specific cluster. - - Args: - request (google.container_v1beta1.types.SetAddonsConfigRequest): - The request object. SetAddonsRequest sets the addons - associated with the cluster. - project_id (str): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (str): - Required. Deprecated. The name of the - cluster to upgrade. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - addons_config (google.container_v1beta1.types.AddonsConfig): - Required. The desired configurations - for the various addons available to run - in the cluster. - - This corresponds to the ``addons_config`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, addons_config]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.SetAddonsConfigRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.SetAddonsConfigRequest): - request = cluster_service.SetAddonsConfigRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if addons_config is not None: - request.addons_config = addons_config - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.set_addons_config] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def set_locations(self, - request: cluster_service.SetLocationsRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - locations: Sequence[str] = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Sets the locations for a specific cluster. Deprecated. Use - `projects.locations.clusters.update `__ - instead. - - Args: - request (google.container_v1beta1.types.SetLocationsRequest): - The request object. SetLocationsRequest sets the - locations of the cluster. - project_id (str): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (str): - Required. Deprecated. The name of the - cluster to upgrade. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - locations (Sequence[str]): - Required. The desired list of Google Compute Engine - `zones `__ - in which the cluster's nodes should be located. Changing - the locations a cluster is in will result in nodes being - either created or removed from the cluster, depending on - whether locations are being added or removed. - - This list must always include the cluster's primary - zone. - - This corresponds to the ``locations`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - warnings.warn("ClusterManagerClient.set_locations is deprecated", - DeprecationWarning) - - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, locations]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.SetLocationsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.SetLocationsRequest): - request = cluster_service.SetLocationsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if locations is not None: - request.locations = locations - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.set_locations] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_master(self, - request: cluster_service.UpdateMasterRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - master_version: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Updates the master for a specific cluster. - - Args: - request (google.container_v1beta1.types.UpdateMasterRequest): - The request object. UpdateMasterRequest updates the - master of the cluster. - project_id (str): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (str): - Required. Deprecated. The name of the - cluster to upgrade. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - master_version (str): - Required. The Kubernetes version to - change the master to. - Users may specify either explicit - versions offered by Kubernetes Engine or - version aliases, which have the - following behavior: - - "latest": picks the highest valid - Kubernetes version - "1.X": picks the - highest valid patch+gke.N patch in the - 1.X version - "1.X.Y": picks the highest - valid gke.N patch in the 1.X.Y version - - "1.X.Y-gke.N": picks an explicit - Kubernetes version - "-": picks the - default Kubernetes version - - This corresponds to the ``master_version`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, master_version]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.UpdateMasterRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.UpdateMasterRequest): - request = cluster_service.UpdateMasterRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if master_version is not None: - request.master_version = master_version - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_master] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def set_master_auth(self, - request: cluster_service.SetMasterAuthRequest = None, - *, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Sets master auth materials. Currently supports - changing the admin password or a specific cluster, - either via password generation or explicitly setting the - password. - - Args: - request (google.container_v1beta1.types.SetMasterAuthRequest): - The request object. SetMasterAuthRequest updates the - admin password of a cluster. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.SetMasterAuthRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.SetMasterAuthRequest): - request = cluster_service.SetMasterAuthRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.set_master_auth] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_cluster(self, - request: cluster_service.DeleteClusterRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Deletes the cluster, including the Kubernetes - endpoint and all worker nodes. - - Firewalls and routes that were configured during cluster - creation are also deleted. - - Other Google Compute Engine resources that might be in - use by the cluster, such as load balancer resources, are - not deleted if they weren't present when the cluster was - initially created. - - Args: - request (google.container_v1beta1.types.DeleteClusterRequest): - The request object. DeleteClusterRequest deletes a - cluster. - project_id (str): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (str): - Required. Deprecated. The name of the - cluster to delete. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.DeleteClusterRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.DeleteClusterRequest): - request = cluster_service.DeleteClusterRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_cluster] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_operations(self, - request: cluster_service.ListOperationsRequest = None, - *, - project_id: str = None, - zone: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.ListOperationsResponse: - r"""Lists all operations in a project in the specified - zone or all zones. - - Args: - request (google.container_v1beta1.types.ListOperationsRequest): - The request object. ListOperationsRequest lists - operations. - project_id (str): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the - parent field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - to return operations for, or ``-`` for all zones. This - field has been deprecated and replaced by the parent - field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.ListOperationsResponse: - ListOperationsResponse is the result - of ListOperationsRequest. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.ListOperationsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.ListOperationsRequest): - request = cluster_service.ListOperationsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_operations] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_operation(self, - request: cluster_service.GetOperationRequest = None, - *, - project_id: str = None, - zone: str = None, - operation_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Gets the specified operation. - - Args: - request (google.container_v1beta1.types.GetOperationRequest): - The request object. GetOperationRequest gets a single - operation. - project_id (str): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - operation_id (str): - Required. Deprecated. The server-assigned ``name`` of - the operation. This field has been deprecated and - replaced by the name field. - - This corresponds to the ``operation_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, operation_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.GetOperationRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.GetOperationRequest): - request = cluster_service.GetOperationRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if operation_id is not None: - request.operation_id = operation_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_operation] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def cancel_operation(self, - request: cluster_service.CancelOperationRequest = None, - *, - project_id: str = None, - zone: str = None, - operation_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Cancels the specified operation. - - Args: - request (google.container_v1beta1.types.CancelOperationRequest): - The request object. CancelOperationRequest cancels a - single operation. - project_id (str): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - in which the operation resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - operation_id (str): - Required. Deprecated. The server-assigned ``name`` of - the operation. This field has been deprecated and - replaced by the name field. - - This corresponds to the ``operation_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, operation_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.CancelOperationRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.CancelOperationRequest): - request = cluster_service.CancelOperationRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if operation_id is not None: - request.operation_id = operation_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.cancel_operation] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def get_server_config(self, - request: cluster_service.GetServerConfigRequest = None, - *, - project_id: str = None, - zone: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.ServerConfig: - r"""Returns configuration info about the Google - Kubernetes Engine service. - - Args: - request (google.container_v1beta1.types.GetServerConfigRequest): - The request object. Gets the current Kubernetes Engine - service configuration. - project_id (str): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - to return operations for. This field has been deprecated - and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.ServerConfig: - Kubernetes Engine service - configuration. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.GetServerConfigRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.GetServerConfigRequest): - request = cluster_service.GetServerConfigRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_server_config] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_node_pools(self, - request: cluster_service.ListNodePoolsRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.ListNodePoolsResponse: - r"""Lists the node pools for a cluster. - - Args: - request (google.container_v1beta1.types.ListNodePoolsRequest): - The request object. ListNodePoolsRequest lists the node - pool(s) for a cluster. - project_id (str): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the - parent field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the parent field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (str): - Required. Deprecated. The name of the - cluster. This field has been deprecated - and replaced by the parent field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.ListNodePoolsResponse: - ListNodePoolsResponse is the result - of ListNodePoolsRequest. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.ListNodePoolsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.ListNodePoolsRequest): - request = cluster_service.ListNodePoolsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_node_pools] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_json_web_keys(self, - request: cluster_service.GetJSONWebKeysRequest = None, - *, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.GetJSONWebKeysResponse: - r"""Gets the public component of the cluster signing keys - in JSON Web Key format. - This API is not yet intended for general use, and is not - available for all clusters. - - Args: - request (google.container_v1beta1.types.GetJSONWebKeysRequest): - The request object. GetJSONWebKeysRequest gets the - public component of the keys used by the cluster to sign - token requests. This will be the jwks_uri for the - discover document returned by getOpenIDConfig. See the - OpenID Connect Discovery 1.0 specification for details. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.GetJSONWebKeysResponse: - GetJSONWebKeysResponse is a valid - JSON Web Key Set as specififed in rfc - 7517 - - """ - # Create or coerce a protobuf request object. - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.GetJSONWebKeysRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.GetJSONWebKeysRequest): - request = cluster_service.GetJSONWebKeysRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_json_web_keys] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_node_pool(self, - request: cluster_service.GetNodePoolRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - node_pool_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.NodePool: - r"""Retrieves the requested node pool. - - Args: - request (google.container_v1beta1.types.GetNodePoolRequest): - The request object. GetNodePoolRequest retrieves a node - pool for a cluster. - project_id (str): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (str): - Required. Deprecated. The name of the - cluster. This field has been deprecated - and replaced by the name field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - node_pool_id (str): - Required. Deprecated. The name of the - node pool. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``node_pool_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.NodePool: - NodePool contains the name and - configuration for a cluster's node pool. - Node pools are a set of nodes (i.e. - VM's), with a common configuration and - specification, under the control of the - cluster master. They may have a set of - Kubernetes labels applied to them, which - may be used to reference them during pod - scheduling. They may also be resized up - or down, to accommodate the workload. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, node_pool_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.GetNodePoolRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.GetNodePoolRequest): - request = cluster_service.GetNodePoolRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if node_pool_id is not None: - request.node_pool_id = node_pool_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_node_pool] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def create_node_pool(self, - request: cluster_service.CreateNodePoolRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - node_pool: cluster_service.NodePool = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Creates a node pool for a cluster. - - Args: - request (google.container_v1beta1.types.CreateNodePoolRequest): - The request object. CreateNodePoolRequest creates a node - pool for a cluster. - project_id (str): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the - parent field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the parent field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (str): - Required. Deprecated. The name of the - cluster. This field has been deprecated - and replaced by the parent field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - node_pool (google.container_v1beta1.types.NodePool): - Required. The node pool to create. - This corresponds to the ``node_pool`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, node_pool]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.CreateNodePoolRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.CreateNodePoolRequest): - request = cluster_service.CreateNodePoolRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if node_pool is not None: - request.node_pool = node_pool - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_node_pool] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_node_pool(self, - request: cluster_service.DeleteNodePoolRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - node_pool_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Deletes a node pool from a cluster. - - Args: - request (google.container_v1beta1.types.DeleteNodePoolRequest): - The request object. DeleteNodePoolRequest deletes a node - pool for a cluster. - project_id (str): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (str): - Required. Deprecated. The name of the - cluster. This field has been deprecated - and replaced by the name field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - node_pool_id (str): - Required. Deprecated. The name of the - node pool to delete. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``node_pool_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, node_pool_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.DeleteNodePoolRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.DeleteNodePoolRequest): - request = cluster_service.DeleteNodePoolRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if node_pool_id is not None: - request.node_pool_id = node_pool_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_node_pool] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def rollback_node_pool_upgrade(self, - request: cluster_service.RollbackNodePoolUpgradeRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - node_pool_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Rolls back a previously Aborted or Failed NodePool - upgrade. This makes no changes if the last upgrade - successfully completed. - - Args: - request (google.container_v1beta1.types.RollbackNodePoolUpgradeRequest): - The request object. RollbackNodePoolUpgradeRequest - rollbacks the previously Aborted or Failed NodePool - upgrade. This will be an no-op if the last upgrade - successfully completed. - project_id (str): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (str): - Required. Deprecated. The name of the - cluster to rollback. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - node_pool_id (str): - Required. Deprecated. The name of the - node pool to rollback. This field has - been deprecated and replaced by the name - field. - - This corresponds to the ``node_pool_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, node_pool_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.RollbackNodePoolUpgradeRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.RollbackNodePoolUpgradeRequest): - request = cluster_service.RollbackNodePoolUpgradeRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if node_pool_id is not None: - request.node_pool_id = node_pool_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.rollback_node_pool_upgrade] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def set_node_pool_management(self, - request: cluster_service.SetNodePoolManagementRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - node_pool_id: str = None, - management: cluster_service.NodeManagement = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Sets the NodeManagement options for a node pool. - - Args: - request (google.container_v1beta1.types.SetNodePoolManagementRequest): - The request object. SetNodePoolManagementRequest sets - the node management properties of a node pool. - project_id (str): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (str): - Required. Deprecated. The name of the - cluster to update. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - node_pool_id (str): - Required. Deprecated. The name of the - node pool to update. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``node_pool_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - management (google.container_v1beta1.types.NodeManagement): - Required. NodeManagement - configuration for the node pool. - - This corresponds to the ``management`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, node_pool_id, management]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.SetNodePoolManagementRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.SetNodePoolManagementRequest): - request = cluster_service.SetNodePoolManagementRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if node_pool_id is not None: - request.node_pool_id = node_pool_id - if management is not None: - request.management = management - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.set_node_pool_management] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def set_labels(self, - request: cluster_service.SetLabelsRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - resource_labels: Sequence[cluster_service.SetLabelsRequest.ResourceLabelsEntry] = None, - label_fingerprint: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Sets labels on a cluster. - - Args: - request (google.container_v1beta1.types.SetLabelsRequest): - The request object. SetLabelsRequest sets the Google - Cloud Platform labels on a Google Container Engine - cluster, which will in turn set them for Google Compute - Engine resources used by that cluster - project_id (str): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (str): - Required. Deprecated. The name of the - cluster. This field has been deprecated - and replaced by the name field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - resource_labels (Sequence[google.container_v1beta1.types.SetLabelsRequest.ResourceLabelsEntry]): - Required. The labels to set for that - cluster. - - This corresponds to the ``resource_labels`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - label_fingerprint (str): - Required. The fingerprint of the previous set of labels - for this resource, used to detect conflicts. The - fingerprint is initially generated by Kubernetes Engine - and changes after every request to modify or update - labels. You must always provide an up-to-date - fingerprint hash when updating or changing labels. Make - a ``get()`` request to the resource to get the latest - fingerprint. - - This corresponds to the ``label_fingerprint`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, resource_labels, label_fingerprint]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.SetLabelsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.SetLabelsRequest): - request = cluster_service.SetLabelsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if resource_labels is not None: - request.resource_labels = resource_labels - if label_fingerprint is not None: - request.label_fingerprint = label_fingerprint - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.set_labels] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def set_legacy_abac(self, - request: cluster_service.SetLegacyAbacRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - enabled: bool = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Enables or disables the ABAC authorization mechanism - on a cluster. - - Args: - request (google.container_v1beta1.types.SetLegacyAbacRequest): - The request object. SetLegacyAbacRequest enables or - disables the ABAC authorization mechanism for a cluster. - project_id (str): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (str): - Required. Deprecated. The name of the - cluster to update. This field has been - deprecated and replaced by the name - field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - enabled (bool): - Required. Whether ABAC authorization - will be enabled in the cluster. - - This corresponds to the ``enabled`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, enabled]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.SetLegacyAbacRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.SetLegacyAbacRequest): - request = cluster_service.SetLegacyAbacRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if enabled is not None: - request.enabled = enabled - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.set_legacy_abac] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def start_ip_rotation(self, - request: cluster_service.StartIPRotationRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Starts master IP rotation. - - Args: - request (google.container_v1beta1.types.StartIPRotationRequest): - The request object. StartIPRotationRequest creates a new - IP for the cluster and then performs a node upgrade on - each node pool to point to the new IP. - project_id (str): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (str): - Required. Deprecated. The name of the - cluster. This field has been deprecated - and replaced by the name field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.StartIPRotationRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.StartIPRotationRequest): - request = cluster_service.StartIPRotationRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.start_ip_rotation] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def complete_ip_rotation(self, - request: cluster_service.CompleteIPRotationRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Completes master IP rotation. - - Args: - request (google.container_v1beta1.types.CompleteIPRotationRequest): - The request object. CompleteIPRotationRequest moves the - cluster master back into single-IP mode. - project_id (str): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (str): - Required. Deprecated. The name of the - cluster. This field has been deprecated - and replaced by the name field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.CompleteIPRotationRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.CompleteIPRotationRequest): - request = cluster_service.CompleteIPRotationRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.complete_ip_rotation] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def set_node_pool_size(self, - request: cluster_service.SetNodePoolSizeRequest = None, - *, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Sets the size for a specific node pool. - - Args: - request (google.container_v1beta1.types.SetNodePoolSizeRequest): - The request object. SetNodePoolSizeRequest sets the size - a node pool. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.SetNodePoolSizeRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.SetNodePoolSizeRequest): - request = cluster_service.SetNodePoolSizeRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.set_node_pool_size] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def set_network_policy(self, - request: cluster_service.SetNetworkPolicyRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - network_policy: cluster_service.NetworkPolicy = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Enables or disables Network Policy for a cluster. - - Args: - request (google.container_v1beta1.types.SetNetworkPolicyRequest): - The request object. SetNetworkPolicyRequest - enables/disables network policy for a cluster. - project_id (str): - Required. Deprecated. The Google Developers Console - `project ID or project - number `__. - This field has been deprecated and replaced by the name - field. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Required. Deprecated. The name of the Google Compute - Engine - `zone `__ - in which the cluster resides. This field has been - deprecated and replaced by the name field. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (str): - Required. Deprecated. The name of the - cluster. This field has been deprecated - and replaced by the name field. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - network_policy (google.container_v1beta1.types.NetworkPolicy): - Required. Configuration options for - the NetworkPolicy feature. - - This corresponds to the ``network_policy`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, network_policy]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.SetNetworkPolicyRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.SetNetworkPolicyRequest): - request = cluster_service.SetNetworkPolicyRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if network_policy is not None: - request.network_policy = network_policy - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.set_network_policy] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def set_maintenance_policy(self, - request: cluster_service.SetMaintenancePolicyRequest = None, - *, - project_id: str = None, - zone: str = None, - cluster_id: str = None, - maintenance_policy: cluster_service.MaintenancePolicy = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.Operation: - r"""Sets the maintenance policy for a cluster. - - Args: - request (google.container_v1beta1.types.SetMaintenancePolicyRequest): - The request object. SetMaintenancePolicyRequest sets the - maintenance policy for a cluster. - project_id (str): - Required. The Google Developers Console `project ID or - project - number `__. - - This corresponds to the ``project_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - zone (str): - Required. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. - - This corresponds to the ``zone`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (str): - Required. The name of the cluster to - update. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - maintenance_policy (google.container_v1beta1.types.MaintenancePolicy): - Required. The maintenance policy to - be set for the cluster. An empty field - clears the existing maintenance policy. - - This corresponds to the ``maintenance_policy`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.Operation: - This operation resource represents - operations that may have happened or are - happening on the cluster. All fields are - output only. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([project_id, zone, cluster_id, maintenance_policy]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.SetMaintenancePolicyRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.SetMaintenancePolicyRequest): - request = cluster_service.SetMaintenancePolicyRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if project_id is not None: - request.project_id = project_id - if zone is not None: - request.zone = zone - if cluster_id is not None: - request.cluster_id = cluster_id - if maintenance_policy is not None: - request.maintenance_policy = maintenance_policy - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.set_maintenance_policy] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_usable_subnetworks(self, - request: cluster_service.ListUsableSubnetworksRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListUsableSubnetworksPager: - r"""Lists subnetworks that can be used for creating - clusters in a project. - - Args: - request (google.container_v1beta1.types.ListUsableSubnetworksRequest): - The request object. ListUsableSubnetworksRequest - requests the list of usable subnetworks. available to a - user for creating clusters. - parent (str): - Required. The parent project where subnetworks are - usable. Specified in the format ``projects/*``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.services.cluster_manager.pagers.ListUsableSubnetworksPager: - ListUsableSubnetworksResponse is the - response of - ListUsableSubnetworksRequest. - - Iterating over this object will yield - results and resolve additional pages - automatically. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.ListUsableSubnetworksRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.ListUsableSubnetworksRequest): - request = cluster_service.ListUsableSubnetworksRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_usable_subnetworks] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListUsableSubnetworksPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_locations(self, - request: cluster_service.ListLocationsRequest = None, - *, - parent: str = None, - retry: retries.Retry = gapic_v1.method.DEFAULT, - timeout: float = None, - metadata: Sequence[Tuple[str, str]] = (), - ) -> cluster_service.ListLocationsResponse: - r"""Fetches locations that offer Google Kubernetes - Engine. - - Args: - request (google.container_v1beta1.types.ListLocationsRequest): - The request object. ListLocationsRequest is used to - request the locations that offer GKE. - parent (str): - Required. Contains the name of the resource requested. - Specified in the format ``projects/*``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.container_v1beta1.types.ListLocationsResponse: - ListLocationsResponse returns the - list of all GKE locations and their - recommendation state. - - """ - # Create or coerce a protobuf request object. - # Sanity check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a cluster_service.ListLocationsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, cluster_service.ListLocationsRequest): - request = cluster_service.ListLocationsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_locations] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - - - - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-container", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - - -__all__ = ( - "ClusterManagerClient", -) diff --git a/owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/pagers.py b/owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/pagers.py deleted file mode 100644 index 59e94e70..00000000 --- a/owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/pagers.py +++ /dev/null @@ -1,140 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional - -from google.container_v1beta1.types import cluster_service - - -class ListUsableSubnetworksPager: - """A pager for iterating through ``list_usable_subnetworks`` requests. - - This class thinly wraps an initial - :class:`google.container_v1beta1.types.ListUsableSubnetworksResponse` object, and - provides an ``__iter__`` method to iterate through its - ``subnetworks`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListUsableSubnetworks`` requests and continue to iterate - through the ``subnetworks`` field on the - corresponding responses. - - All the usual :class:`google.container_v1beta1.types.ListUsableSubnetworksResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., cluster_service.ListUsableSubnetworksResponse], - request: cluster_service.ListUsableSubnetworksRequest, - response: cluster_service.ListUsableSubnetworksResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.container_v1beta1.types.ListUsableSubnetworksRequest): - The initial request object. - response (google.container_v1beta1.types.ListUsableSubnetworksResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = cluster_service.ListUsableSubnetworksRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterable[cluster_service.ListUsableSubnetworksResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterable[cluster_service.UsableSubnetwork]: - for page in self.pages: - yield from page.subnetworks - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListUsableSubnetworksAsyncPager: - """A pager for iterating through ``list_usable_subnetworks`` requests. - - This class thinly wraps an initial - :class:`google.container_v1beta1.types.ListUsableSubnetworksResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``subnetworks`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListUsableSubnetworks`` requests and continue to iterate - through the ``subnetworks`` field on the - corresponding responses. - - All the usual :class:`google.container_v1beta1.types.ListUsableSubnetworksResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[cluster_service.ListUsableSubnetworksResponse]], - request: cluster_service.ListUsableSubnetworksRequest, - response: cluster_service.ListUsableSubnetworksResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.container_v1beta1.types.ListUsableSubnetworksRequest): - The initial request object. - response (google.container_v1beta1.types.ListUsableSubnetworksResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = cluster_service.ListUsableSubnetworksRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterable[cluster_service.ListUsableSubnetworksResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - - def __aiter__(self) -> AsyncIterable[cluster_service.UsableSubnetwork]: - async def async_generator(): - async for page in self.pages: - for response in page.subnetworks: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/transports/__init__.py b/owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/transports/__init__.py deleted file mode 100644 index 32ea8716..00000000 --- a/owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/transports/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import ClusterManagerTransport -from .grpc import ClusterManagerGrpcTransport -from .grpc_asyncio import ClusterManagerGrpcAsyncIOTransport - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[ClusterManagerTransport]] -_transport_registry['grpc'] = ClusterManagerGrpcTransport -_transport_registry['grpc_asyncio'] = ClusterManagerGrpcAsyncIOTransport - -__all__ = ( - 'ClusterManagerTransport', - 'ClusterManagerGrpcTransport', - 'ClusterManagerGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/transports/base.py b/owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/transports/base.py deleted file mode 100644 index bb24b465..00000000 --- a/owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/transports/base.py +++ /dev/null @@ -1,694 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import packaging.version -import pkg_resources - -import google.auth # type: ignore -import google.api_core # type: ignore -from google.api_core import exceptions as core_exceptions # type: ignore -from google.api_core import gapic_v1 # type: ignore -from google.api_core import retry as retries # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.container_v1beta1.types import cluster_service -from google.protobuf import empty_pb2 # type: ignore - -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - 'google-container', - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() - -try: - # google.auth.__version__ was added in 1.26.0 - _GOOGLE_AUTH_VERSION = google.auth.__version__ -except AttributeError: - try: # try pkg_resources if it is available - _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version - except pkg_resources.DistributionNotFound: # pragma: NO COVER - _GOOGLE_AUTH_VERSION = None - - -class ClusterManagerTransport(abc.ABC): - """Abstract transport class for ClusterManager.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/cloud-platform', - ) - - DEFAULT_HOST: str = 'container.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - - # If the credentials is service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - # TODO(busunkim): This method is in the base transport - # to avoid duplicating code across the transport classes. These functions - # should be deleted once the minimum required versions of google-auth is increased. - - # TODO: Remove this function once google-auth >= 1.25.0 is required - @classmethod - def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]: - """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" - - scopes_kwargs = {} - - if _GOOGLE_AUTH_VERSION and ( - packaging.version.parse(_GOOGLE_AUTH_VERSION) - >= packaging.version.parse("1.25.0") - ): - scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} - else: - scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} - - return scopes_kwargs - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.list_clusters: gapic_v1.method.wrap_method( - self.list_clusters, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=20.0, - ), - default_timeout=20.0, - client_info=client_info, - ), - self.get_cluster: gapic_v1.method.wrap_method( - self.get_cluster, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=20.0, - ), - default_timeout=20.0, - client_info=client_info, - ), - self.create_cluster: gapic_v1.method.wrap_method( - self.create_cluster, - default_timeout=45.0, - client_info=client_info, - ), - self.update_cluster: gapic_v1.method.wrap_method( - self.update_cluster, - default_timeout=45.0, - client_info=client_info, - ), - self.update_node_pool: gapic_v1.method.wrap_method( - self.update_node_pool, - default_timeout=45.0, - client_info=client_info, - ), - self.set_node_pool_autoscaling: gapic_v1.method.wrap_method( - self.set_node_pool_autoscaling, - default_timeout=45.0, - client_info=client_info, - ), - self.set_logging_service: gapic_v1.method.wrap_method( - self.set_logging_service, - default_timeout=45.0, - client_info=client_info, - ), - self.set_monitoring_service: gapic_v1.method.wrap_method( - self.set_monitoring_service, - default_timeout=45.0, - client_info=client_info, - ), - self.set_addons_config: gapic_v1.method.wrap_method( - self.set_addons_config, - default_timeout=45.0, - client_info=client_info, - ), - self.set_locations: gapic_v1.method.wrap_method( - self.set_locations, - default_timeout=45.0, - client_info=client_info, - ), - self.update_master: gapic_v1.method.wrap_method( - self.update_master, - default_timeout=45.0, - client_info=client_info, - ), - self.set_master_auth: gapic_v1.method.wrap_method( - self.set_master_auth, - default_timeout=45.0, - client_info=client_info, - ), - self.delete_cluster: gapic_v1.method.wrap_method( - self.delete_cluster, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=20.0, - ), - default_timeout=20.0, - client_info=client_info, - ), - self.list_operations: gapic_v1.method.wrap_method( - self.list_operations, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=20.0, - ), - default_timeout=20.0, - client_info=client_info, - ), - self.get_operation: gapic_v1.method.wrap_method( - self.get_operation, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=20.0, - ), - default_timeout=20.0, - client_info=client_info, - ), - self.cancel_operation: gapic_v1.method.wrap_method( - self.cancel_operation, - default_timeout=45.0, - client_info=client_info, - ), - self.get_server_config: gapic_v1.method.wrap_method( - self.get_server_config, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=20.0, - ), - default_timeout=20.0, - client_info=client_info, - ), - self.list_node_pools: gapic_v1.method.wrap_method( - self.list_node_pools, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=20.0, - ), - default_timeout=20.0, - client_info=client_info, - ), - self.get_json_web_keys: gapic_v1.method.wrap_method( - self.get_json_web_keys, - default_timeout=None, - client_info=client_info, - ), - self.get_node_pool: gapic_v1.method.wrap_method( - self.get_node_pool, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=20.0, - ), - default_timeout=20.0, - client_info=client_info, - ), - self.create_node_pool: gapic_v1.method.wrap_method( - self.create_node_pool, - default_timeout=45.0, - client_info=client_info, - ), - self.delete_node_pool: gapic_v1.method.wrap_method( - self.delete_node_pool, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=20.0, - ), - default_timeout=20.0, - client_info=client_info, - ), - self.rollback_node_pool_upgrade: gapic_v1.method.wrap_method( - self.rollback_node_pool_upgrade, - default_timeout=45.0, - client_info=client_info, - ), - self.set_node_pool_management: gapic_v1.method.wrap_method( - self.set_node_pool_management, - default_timeout=45.0, - client_info=client_info, - ), - self.set_labels: gapic_v1.method.wrap_method( - self.set_labels, - default_timeout=45.0, - client_info=client_info, - ), - self.set_legacy_abac: gapic_v1.method.wrap_method( - self.set_legacy_abac, - default_timeout=45.0, - client_info=client_info, - ), - self.start_ip_rotation: gapic_v1.method.wrap_method( - self.start_ip_rotation, - default_timeout=45.0, - client_info=client_info, - ), - self.complete_ip_rotation: gapic_v1.method.wrap_method( - self.complete_ip_rotation, - default_timeout=45.0, - client_info=client_info, - ), - self.set_node_pool_size: gapic_v1.method.wrap_method( - self.set_node_pool_size, - default_timeout=45.0, - client_info=client_info, - ), - self.set_network_policy: gapic_v1.method.wrap_method( - self.set_network_policy, - default_timeout=45.0, - client_info=client_info, - ), - self.set_maintenance_policy: gapic_v1.method.wrap_method( - self.set_maintenance_policy, - default_timeout=45.0, - client_info=client_info, - ), - self.list_usable_subnetworks: gapic_v1.method.wrap_method( - self.list_usable_subnetworks, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=20.0, - ), - default_timeout=20.0, - client_info=client_info, - ), - self.list_locations: gapic_v1.method.wrap_method( - self.list_locations, - default_retry=retries.Retry( -initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=20.0, - ), - default_timeout=20.0, - client_info=client_info, - ), - } - - @property - def list_clusters(self) -> Callable[ - [cluster_service.ListClustersRequest], - Union[ - cluster_service.ListClustersResponse, - Awaitable[cluster_service.ListClustersResponse] - ]]: - raise NotImplementedError() - - @property - def get_cluster(self) -> Callable[ - [cluster_service.GetClusterRequest], - Union[ - cluster_service.Cluster, - Awaitable[cluster_service.Cluster] - ]]: - raise NotImplementedError() - - @property - def create_cluster(self) -> Callable[ - [cluster_service.CreateClusterRequest], - Union[ - cluster_service.Operation, - Awaitable[cluster_service.Operation] - ]]: - raise NotImplementedError() - - @property - def update_cluster(self) -> Callable[ - [cluster_service.UpdateClusterRequest], - Union[ - cluster_service.Operation, - Awaitable[cluster_service.Operation] - ]]: - raise NotImplementedError() - - @property - def update_node_pool(self) -> Callable[ - [cluster_service.UpdateNodePoolRequest], - Union[ - cluster_service.Operation, - Awaitable[cluster_service.Operation] - ]]: - raise NotImplementedError() - - @property - def set_node_pool_autoscaling(self) -> Callable[ - [cluster_service.SetNodePoolAutoscalingRequest], - Union[ - cluster_service.Operation, - Awaitable[cluster_service.Operation] - ]]: - raise NotImplementedError() - - @property - def set_logging_service(self) -> Callable[ - [cluster_service.SetLoggingServiceRequest], - Union[ - cluster_service.Operation, - Awaitable[cluster_service.Operation] - ]]: - raise NotImplementedError() - - @property - def set_monitoring_service(self) -> Callable[ - [cluster_service.SetMonitoringServiceRequest], - Union[ - cluster_service.Operation, - Awaitable[cluster_service.Operation] - ]]: - raise NotImplementedError() - - @property - def set_addons_config(self) -> Callable[ - [cluster_service.SetAddonsConfigRequest], - Union[ - cluster_service.Operation, - Awaitable[cluster_service.Operation] - ]]: - raise NotImplementedError() - - @property - def set_locations(self) -> Callable[ - [cluster_service.SetLocationsRequest], - Union[ - cluster_service.Operation, - Awaitable[cluster_service.Operation] - ]]: - raise NotImplementedError() - - @property - def update_master(self) -> Callable[ - [cluster_service.UpdateMasterRequest], - Union[ - cluster_service.Operation, - Awaitable[cluster_service.Operation] - ]]: - raise NotImplementedError() - - @property - def set_master_auth(self) -> Callable[ - [cluster_service.SetMasterAuthRequest], - Union[ - cluster_service.Operation, - Awaitable[cluster_service.Operation] - ]]: - raise NotImplementedError() - - @property - def delete_cluster(self) -> Callable[ - [cluster_service.DeleteClusterRequest], - Union[ - cluster_service.Operation, - Awaitable[cluster_service.Operation] - ]]: - raise NotImplementedError() - - @property - def list_operations(self) -> Callable[ - [cluster_service.ListOperationsRequest], - Union[ - cluster_service.ListOperationsResponse, - Awaitable[cluster_service.ListOperationsResponse] - ]]: - raise NotImplementedError() - - @property - def get_operation(self) -> Callable[ - [cluster_service.GetOperationRequest], - Union[ - cluster_service.Operation, - Awaitable[cluster_service.Operation] - ]]: - raise NotImplementedError() - - @property - def cancel_operation(self) -> Callable[ - [cluster_service.CancelOperationRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - @property - def get_server_config(self) -> Callable[ - [cluster_service.GetServerConfigRequest], - Union[ - cluster_service.ServerConfig, - Awaitable[cluster_service.ServerConfig] - ]]: - raise NotImplementedError() - - @property - def list_node_pools(self) -> Callable[ - [cluster_service.ListNodePoolsRequest], - Union[ - cluster_service.ListNodePoolsResponse, - Awaitable[cluster_service.ListNodePoolsResponse] - ]]: - raise NotImplementedError() - - @property - def get_json_web_keys(self) -> Callable[ - [cluster_service.GetJSONWebKeysRequest], - Union[ - cluster_service.GetJSONWebKeysResponse, - Awaitable[cluster_service.GetJSONWebKeysResponse] - ]]: - raise NotImplementedError() - - @property - def get_node_pool(self) -> Callable[ - [cluster_service.GetNodePoolRequest], - Union[ - cluster_service.NodePool, - Awaitable[cluster_service.NodePool] - ]]: - raise NotImplementedError() - - @property - def create_node_pool(self) -> Callable[ - [cluster_service.CreateNodePoolRequest], - Union[ - cluster_service.Operation, - Awaitable[cluster_service.Operation] - ]]: - raise NotImplementedError() - - @property - def delete_node_pool(self) -> Callable[ - [cluster_service.DeleteNodePoolRequest], - Union[ - cluster_service.Operation, - Awaitable[cluster_service.Operation] - ]]: - raise NotImplementedError() - - @property - def rollback_node_pool_upgrade(self) -> Callable[ - [cluster_service.RollbackNodePoolUpgradeRequest], - Union[ - cluster_service.Operation, - Awaitable[cluster_service.Operation] - ]]: - raise NotImplementedError() - - @property - def set_node_pool_management(self) -> Callable[ - [cluster_service.SetNodePoolManagementRequest], - Union[ - cluster_service.Operation, - Awaitable[cluster_service.Operation] - ]]: - raise NotImplementedError() - - @property - def set_labels(self) -> Callable[ - [cluster_service.SetLabelsRequest], - Union[ - cluster_service.Operation, - Awaitable[cluster_service.Operation] - ]]: - raise NotImplementedError() - - @property - def set_legacy_abac(self) -> Callable[ - [cluster_service.SetLegacyAbacRequest], - Union[ - cluster_service.Operation, - Awaitable[cluster_service.Operation] - ]]: - raise NotImplementedError() - - @property - def start_ip_rotation(self) -> Callable[ - [cluster_service.StartIPRotationRequest], - Union[ - cluster_service.Operation, - Awaitable[cluster_service.Operation] - ]]: - raise NotImplementedError() - - @property - def complete_ip_rotation(self) -> Callable[ - [cluster_service.CompleteIPRotationRequest], - Union[ - cluster_service.Operation, - Awaitable[cluster_service.Operation] - ]]: - raise NotImplementedError() - - @property - def set_node_pool_size(self) -> Callable[ - [cluster_service.SetNodePoolSizeRequest], - Union[ - cluster_service.Operation, - Awaitable[cluster_service.Operation] - ]]: - raise NotImplementedError() - - @property - def set_network_policy(self) -> Callable[ - [cluster_service.SetNetworkPolicyRequest], - Union[ - cluster_service.Operation, - Awaitable[cluster_service.Operation] - ]]: - raise NotImplementedError() - - @property - def set_maintenance_policy(self) -> Callable[ - [cluster_service.SetMaintenancePolicyRequest], - Union[ - cluster_service.Operation, - Awaitable[cluster_service.Operation] - ]]: - raise NotImplementedError() - - @property - def list_usable_subnetworks(self) -> Callable[ - [cluster_service.ListUsableSubnetworksRequest], - Union[ - cluster_service.ListUsableSubnetworksResponse, - Awaitable[cluster_service.ListUsableSubnetworksResponse] - ]]: - raise NotImplementedError() - - @property - def list_locations(self) -> Callable[ - [cluster_service.ListLocationsRequest], - Union[ - cluster_service.ListLocationsResponse, - Awaitable[cluster_service.ListLocationsResponse] - ]]: - raise NotImplementedError() - - -__all__ = ( - 'ClusterManagerTransport', -) diff --git a/owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/transports/grpc.py b/owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/transports/grpc.py deleted file mode 100644 index 59447074..00000000 --- a/owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/transports/grpc.py +++ /dev/null @@ -1,1124 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers # type: ignore -from google.api_core import gapic_v1 # type: ignore -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.container_v1beta1.types import cluster_service -from google.protobuf import empty_pb2 # type: ignore -from .base import ClusterManagerTransport, DEFAULT_CLIENT_INFO - - -class ClusterManagerGrpcTransport(ClusterManagerTransport): - """gRPC backend transport for ClusterManager. - - Google Kubernetes Engine Cluster Manager v1beta1 - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'container.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'container.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def list_clusters(self) -> Callable[ - [cluster_service.ListClustersRequest], - cluster_service.ListClustersResponse]: - r"""Return a callable for the list clusters method over gRPC. - - Lists all clusters owned by a project in either the - specified zone or all zones. - - Returns: - Callable[[~.ListClustersRequest], - ~.ListClustersResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_clusters' not in self._stubs: - self._stubs['list_clusters'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/ListClusters', - request_serializer=cluster_service.ListClustersRequest.serialize, - response_deserializer=cluster_service.ListClustersResponse.deserialize, - ) - return self._stubs['list_clusters'] - - @property - def get_cluster(self) -> Callable[ - [cluster_service.GetClusterRequest], - cluster_service.Cluster]: - r"""Return a callable for the get cluster method over gRPC. - - Gets the details for a specific cluster. - - Returns: - Callable[[~.GetClusterRequest], - ~.Cluster]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_cluster' not in self._stubs: - self._stubs['get_cluster'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/GetCluster', - request_serializer=cluster_service.GetClusterRequest.serialize, - response_deserializer=cluster_service.Cluster.deserialize, - ) - return self._stubs['get_cluster'] - - @property - def create_cluster(self) -> Callable[ - [cluster_service.CreateClusterRequest], - cluster_service.Operation]: - r"""Return a callable for the create cluster method over gRPC. - - Creates a cluster, consisting of the specified number and type - of Google Compute Engine instances. - - By default, the cluster is created in the project's `default - network `__. - - One firewall is added for the cluster. After cluster creation, - the Kubelet creates routes for each node to allow the containers - on that node to communicate with all other instances in the - cluster. - - Finally, an entry is added to the project's global metadata - indicating which CIDR range the cluster is using. - - Returns: - Callable[[~.CreateClusterRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_cluster' not in self._stubs: - self._stubs['create_cluster'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/CreateCluster', - request_serializer=cluster_service.CreateClusterRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['create_cluster'] - - @property - def update_cluster(self) -> Callable[ - [cluster_service.UpdateClusterRequest], - cluster_service.Operation]: - r"""Return a callable for the update cluster method over gRPC. - - Updates the settings for a specific cluster. - - Returns: - Callable[[~.UpdateClusterRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_cluster' not in self._stubs: - self._stubs['update_cluster'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/UpdateCluster', - request_serializer=cluster_service.UpdateClusterRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['update_cluster'] - - @property - def update_node_pool(self) -> Callable[ - [cluster_service.UpdateNodePoolRequest], - cluster_service.Operation]: - r"""Return a callable for the update node pool method over gRPC. - - Updates the version and/or image type of a specific - node pool. - - Returns: - Callable[[~.UpdateNodePoolRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_node_pool' not in self._stubs: - self._stubs['update_node_pool'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/UpdateNodePool', - request_serializer=cluster_service.UpdateNodePoolRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['update_node_pool'] - - @property - def set_node_pool_autoscaling(self) -> Callable[ - [cluster_service.SetNodePoolAutoscalingRequest], - cluster_service.Operation]: - r"""Return a callable for the set node pool autoscaling method over gRPC. - - Sets the autoscaling settings of a specific node - pool. - - Returns: - Callable[[~.SetNodePoolAutoscalingRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_node_pool_autoscaling' not in self._stubs: - self._stubs['set_node_pool_autoscaling'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/SetNodePoolAutoscaling', - request_serializer=cluster_service.SetNodePoolAutoscalingRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['set_node_pool_autoscaling'] - - @property - def set_logging_service(self) -> Callable[ - [cluster_service.SetLoggingServiceRequest], - cluster_service.Operation]: - r"""Return a callable for the set logging service method over gRPC. - - Sets the logging service for a specific cluster. - - Returns: - Callable[[~.SetLoggingServiceRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_logging_service' not in self._stubs: - self._stubs['set_logging_service'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/SetLoggingService', - request_serializer=cluster_service.SetLoggingServiceRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['set_logging_service'] - - @property - def set_monitoring_service(self) -> Callable[ - [cluster_service.SetMonitoringServiceRequest], - cluster_service.Operation]: - r"""Return a callable for the set monitoring service method over gRPC. - - Sets the monitoring service for a specific cluster. - - Returns: - Callable[[~.SetMonitoringServiceRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_monitoring_service' not in self._stubs: - self._stubs['set_monitoring_service'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/SetMonitoringService', - request_serializer=cluster_service.SetMonitoringServiceRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['set_monitoring_service'] - - @property - def set_addons_config(self) -> Callable[ - [cluster_service.SetAddonsConfigRequest], - cluster_service.Operation]: - r"""Return a callable for the set addons config method over gRPC. - - Sets the addons for a specific cluster. - - Returns: - Callable[[~.SetAddonsConfigRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_addons_config' not in self._stubs: - self._stubs['set_addons_config'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/SetAddonsConfig', - request_serializer=cluster_service.SetAddonsConfigRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['set_addons_config'] - - @property - def set_locations(self) -> Callable[ - [cluster_service.SetLocationsRequest], - cluster_service.Operation]: - r"""Return a callable for the set locations method over gRPC. - - Sets the locations for a specific cluster. Deprecated. Use - `projects.locations.clusters.update `__ - instead. - - Returns: - Callable[[~.SetLocationsRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_locations' not in self._stubs: - self._stubs['set_locations'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/SetLocations', - request_serializer=cluster_service.SetLocationsRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['set_locations'] - - @property - def update_master(self) -> Callable[ - [cluster_service.UpdateMasterRequest], - cluster_service.Operation]: - r"""Return a callable for the update master method over gRPC. - - Updates the master for a specific cluster. - - Returns: - Callable[[~.UpdateMasterRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_master' not in self._stubs: - self._stubs['update_master'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/UpdateMaster', - request_serializer=cluster_service.UpdateMasterRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['update_master'] - - @property - def set_master_auth(self) -> Callable[ - [cluster_service.SetMasterAuthRequest], - cluster_service.Operation]: - r"""Return a callable for the set master auth method over gRPC. - - Sets master auth materials. Currently supports - changing the admin password or a specific cluster, - either via password generation or explicitly setting the - password. - - Returns: - Callable[[~.SetMasterAuthRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_master_auth' not in self._stubs: - self._stubs['set_master_auth'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/SetMasterAuth', - request_serializer=cluster_service.SetMasterAuthRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['set_master_auth'] - - @property - def delete_cluster(self) -> Callable[ - [cluster_service.DeleteClusterRequest], - cluster_service.Operation]: - r"""Return a callable for the delete cluster method over gRPC. - - Deletes the cluster, including the Kubernetes - endpoint and all worker nodes. - - Firewalls and routes that were configured during cluster - creation are also deleted. - - Other Google Compute Engine resources that might be in - use by the cluster, such as load balancer resources, are - not deleted if they weren't present when the cluster was - initially created. - - Returns: - Callable[[~.DeleteClusterRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_cluster' not in self._stubs: - self._stubs['delete_cluster'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/DeleteCluster', - request_serializer=cluster_service.DeleteClusterRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['delete_cluster'] - - @property - def list_operations(self) -> Callable[ - [cluster_service.ListOperationsRequest], - cluster_service.ListOperationsResponse]: - r"""Return a callable for the list operations method over gRPC. - - Lists all operations in a project in the specified - zone or all zones. - - Returns: - Callable[[~.ListOperationsRequest], - ~.ListOperationsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_operations' not in self._stubs: - self._stubs['list_operations'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/ListOperations', - request_serializer=cluster_service.ListOperationsRequest.serialize, - response_deserializer=cluster_service.ListOperationsResponse.deserialize, - ) - return self._stubs['list_operations'] - - @property - def get_operation(self) -> Callable[ - [cluster_service.GetOperationRequest], - cluster_service.Operation]: - r"""Return a callable for the get operation method over gRPC. - - Gets the specified operation. - - Returns: - Callable[[~.GetOperationRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_operation' not in self._stubs: - self._stubs['get_operation'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/GetOperation', - request_serializer=cluster_service.GetOperationRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['get_operation'] - - @property - def cancel_operation(self) -> Callable[ - [cluster_service.CancelOperationRequest], - empty_pb2.Empty]: - r"""Return a callable for the cancel operation method over gRPC. - - Cancels the specified operation. - - Returns: - Callable[[~.CancelOperationRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_operation' not in self._stubs: - self._stubs['cancel_operation'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/CancelOperation', - request_serializer=cluster_service.CancelOperationRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_operation'] - - @property - def get_server_config(self) -> Callable[ - [cluster_service.GetServerConfigRequest], - cluster_service.ServerConfig]: - r"""Return a callable for the get server config method over gRPC. - - Returns configuration info about the Google - Kubernetes Engine service. - - Returns: - Callable[[~.GetServerConfigRequest], - ~.ServerConfig]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_server_config' not in self._stubs: - self._stubs['get_server_config'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/GetServerConfig', - request_serializer=cluster_service.GetServerConfigRequest.serialize, - response_deserializer=cluster_service.ServerConfig.deserialize, - ) - return self._stubs['get_server_config'] - - @property - def list_node_pools(self) -> Callable[ - [cluster_service.ListNodePoolsRequest], - cluster_service.ListNodePoolsResponse]: - r"""Return a callable for the list node pools method over gRPC. - - Lists the node pools for a cluster. - - Returns: - Callable[[~.ListNodePoolsRequest], - ~.ListNodePoolsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_node_pools' not in self._stubs: - self._stubs['list_node_pools'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/ListNodePools', - request_serializer=cluster_service.ListNodePoolsRequest.serialize, - response_deserializer=cluster_service.ListNodePoolsResponse.deserialize, - ) - return self._stubs['list_node_pools'] - - @property - def get_json_web_keys(self) -> Callable[ - [cluster_service.GetJSONWebKeysRequest], - cluster_service.GetJSONWebKeysResponse]: - r"""Return a callable for the get json web keys method over gRPC. - - Gets the public component of the cluster signing keys - in JSON Web Key format. - This API is not yet intended for general use, and is not - available for all clusters. - - Returns: - Callable[[~.GetJSONWebKeysRequest], - ~.GetJSONWebKeysResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_json_web_keys' not in self._stubs: - self._stubs['get_json_web_keys'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/GetJSONWebKeys', - request_serializer=cluster_service.GetJSONWebKeysRequest.serialize, - response_deserializer=cluster_service.GetJSONWebKeysResponse.deserialize, - ) - return self._stubs['get_json_web_keys'] - - @property - def get_node_pool(self) -> Callable[ - [cluster_service.GetNodePoolRequest], - cluster_service.NodePool]: - r"""Return a callable for the get node pool method over gRPC. - - Retrieves the requested node pool. - - Returns: - Callable[[~.GetNodePoolRequest], - ~.NodePool]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_node_pool' not in self._stubs: - self._stubs['get_node_pool'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/GetNodePool', - request_serializer=cluster_service.GetNodePoolRequest.serialize, - response_deserializer=cluster_service.NodePool.deserialize, - ) - return self._stubs['get_node_pool'] - - @property - def create_node_pool(self) -> Callable[ - [cluster_service.CreateNodePoolRequest], - cluster_service.Operation]: - r"""Return a callable for the create node pool method over gRPC. - - Creates a node pool for a cluster. - - Returns: - Callable[[~.CreateNodePoolRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_node_pool' not in self._stubs: - self._stubs['create_node_pool'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/CreateNodePool', - request_serializer=cluster_service.CreateNodePoolRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['create_node_pool'] - - @property - def delete_node_pool(self) -> Callable[ - [cluster_service.DeleteNodePoolRequest], - cluster_service.Operation]: - r"""Return a callable for the delete node pool method over gRPC. - - Deletes a node pool from a cluster. - - Returns: - Callable[[~.DeleteNodePoolRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_node_pool' not in self._stubs: - self._stubs['delete_node_pool'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/DeleteNodePool', - request_serializer=cluster_service.DeleteNodePoolRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['delete_node_pool'] - - @property - def rollback_node_pool_upgrade(self) -> Callable[ - [cluster_service.RollbackNodePoolUpgradeRequest], - cluster_service.Operation]: - r"""Return a callable for the rollback node pool upgrade method over gRPC. - - Rolls back a previously Aborted or Failed NodePool - upgrade. This makes no changes if the last upgrade - successfully completed. - - Returns: - Callable[[~.RollbackNodePoolUpgradeRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'rollback_node_pool_upgrade' not in self._stubs: - self._stubs['rollback_node_pool_upgrade'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/RollbackNodePoolUpgrade', - request_serializer=cluster_service.RollbackNodePoolUpgradeRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['rollback_node_pool_upgrade'] - - @property - def set_node_pool_management(self) -> Callable[ - [cluster_service.SetNodePoolManagementRequest], - cluster_service.Operation]: - r"""Return a callable for the set node pool management method over gRPC. - - Sets the NodeManagement options for a node pool. - - Returns: - Callable[[~.SetNodePoolManagementRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_node_pool_management' not in self._stubs: - self._stubs['set_node_pool_management'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/SetNodePoolManagement', - request_serializer=cluster_service.SetNodePoolManagementRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['set_node_pool_management'] - - @property - def set_labels(self) -> Callable[ - [cluster_service.SetLabelsRequest], - cluster_service.Operation]: - r"""Return a callable for the set labels method over gRPC. - - Sets labels on a cluster. - - Returns: - Callable[[~.SetLabelsRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_labels' not in self._stubs: - self._stubs['set_labels'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/SetLabels', - request_serializer=cluster_service.SetLabelsRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['set_labels'] - - @property - def set_legacy_abac(self) -> Callable[ - [cluster_service.SetLegacyAbacRequest], - cluster_service.Operation]: - r"""Return a callable for the set legacy abac method over gRPC. - - Enables or disables the ABAC authorization mechanism - on a cluster. - - Returns: - Callable[[~.SetLegacyAbacRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_legacy_abac' not in self._stubs: - self._stubs['set_legacy_abac'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/SetLegacyAbac', - request_serializer=cluster_service.SetLegacyAbacRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['set_legacy_abac'] - - @property - def start_ip_rotation(self) -> Callable[ - [cluster_service.StartIPRotationRequest], - cluster_service.Operation]: - r"""Return a callable for the start ip rotation method over gRPC. - - Starts master IP rotation. - - Returns: - Callable[[~.StartIPRotationRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'start_ip_rotation' not in self._stubs: - self._stubs['start_ip_rotation'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/StartIPRotation', - request_serializer=cluster_service.StartIPRotationRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['start_ip_rotation'] - - @property - def complete_ip_rotation(self) -> Callable[ - [cluster_service.CompleteIPRotationRequest], - cluster_service.Operation]: - r"""Return a callable for the complete ip rotation method over gRPC. - - Completes master IP rotation. - - Returns: - Callable[[~.CompleteIPRotationRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'complete_ip_rotation' not in self._stubs: - self._stubs['complete_ip_rotation'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/CompleteIPRotation', - request_serializer=cluster_service.CompleteIPRotationRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['complete_ip_rotation'] - - @property - def set_node_pool_size(self) -> Callable[ - [cluster_service.SetNodePoolSizeRequest], - cluster_service.Operation]: - r"""Return a callable for the set node pool size method over gRPC. - - Sets the size for a specific node pool. - - Returns: - Callable[[~.SetNodePoolSizeRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_node_pool_size' not in self._stubs: - self._stubs['set_node_pool_size'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/SetNodePoolSize', - request_serializer=cluster_service.SetNodePoolSizeRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['set_node_pool_size'] - - @property - def set_network_policy(self) -> Callable[ - [cluster_service.SetNetworkPolicyRequest], - cluster_service.Operation]: - r"""Return a callable for the set network policy method over gRPC. - - Enables or disables Network Policy for a cluster. - - Returns: - Callable[[~.SetNetworkPolicyRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_network_policy' not in self._stubs: - self._stubs['set_network_policy'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/SetNetworkPolicy', - request_serializer=cluster_service.SetNetworkPolicyRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['set_network_policy'] - - @property - def set_maintenance_policy(self) -> Callable[ - [cluster_service.SetMaintenancePolicyRequest], - cluster_service.Operation]: - r"""Return a callable for the set maintenance policy method over gRPC. - - Sets the maintenance policy for a cluster. - - Returns: - Callable[[~.SetMaintenancePolicyRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_maintenance_policy' not in self._stubs: - self._stubs['set_maintenance_policy'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/SetMaintenancePolicy', - request_serializer=cluster_service.SetMaintenancePolicyRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['set_maintenance_policy'] - - @property - def list_usable_subnetworks(self) -> Callable[ - [cluster_service.ListUsableSubnetworksRequest], - cluster_service.ListUsableSubnetworksResponse]: - r"""Return a callable for the list usable subnetworks method over gRPC. - - Lists subnetworks that can be used for creating - clusters in a project. - - Returns: - Callable[[~.ListUsableSubnetworksRequest], - ~.ListUsableSubnetworksResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_usable_subnetworks' not in self._stubs: - self._stubs['list_usable_subnetworks'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/ListUsableSubnetworks', - request_serializer=cluster_service.ListUsableSubnetworksRequest.serialize, - response_deserializer=cluster_service.ListUsableSubnetworksResponse.deserialize, - ) - return self._stubs['list_usable_subnetworks'] - - @property - def list_locations(self) -> Callable[ - [cluster_service.ListLocationsRequest], - cluster_service.ListLocationsResponse]: - r"""Return a callable for the list locations method over gRPC. - - Fetches locations that offer Google Kubernetes - Engine. - - Returns: - Callable[[~.ListLocationsRequest], - ~.ListLocationsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_locations' not in self._stubs: - self._stubs['list_locations'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/ListLocations', - request_serializer=cluster_service.ListLocationsRequest.serialize, - response_deserializer=cluster_service.ListLocationsResponse.deserialize, - ) - return self._stubs['list_locations'] - - -__all__ = ( - 'ClusterManagerGrpcTransport', -) diff --git a/owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/transports/grpc_asyncio.py b/owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/transports/grpc_asyncio.py deleted file mode 100644 index f2dd2dda..00000000 --- a/owl-bot-staging/v1beta1/google/container_v1beta1/services/cluster_manager/transports/grpc_asyncio.py +++ /dev/null @@ -1,1128 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 # type: ignore -from google.api_core import grpc_helpers_async # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -import packaging.version - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.container_v1beta1.types import cluster_service -from google.protobuf import empty_pb2 # type: ignore -from .base import ClusterManagerTransport, DEFAULT_CLIENT_INFO -from .grpc import ClusterManagerGrpcTransport - - -class ClusterManagerGrpcAsyncIOTransport(ClusterManagerTransport): - """gRPC AsyncIO backend transport for ClusterManager. - - Google Kubernetes Engine Cluster Manager v1beta1 - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'container.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'container.googleapis.com', - credentials: ga_credentials.Credentials = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to. - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or applicatin default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - credentials=self._credentials, - credentials_file=credentials_file, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def list_clusters(self) -> Callable[ - [cluster_service.ListClustersRequest], - Awaitable[cluster_service.ListClustersResponse]]: - r"""Return a callable for the list clusters method over gRPC. - - Lists all clusters owned by a project in either the - specified zone or all zones. - - Returns: - Callable[[~.ListClustersRequest], - Awaitable[~.ListClustersResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_clusters' not in self._stubs: - self._stubs['list_clusters'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/ListClusters', - request_serializer=cluster_service.ListClustersRequest.serialize, - response_deserializer=cluster_service.ListClustersResponse.deserialize, - ) - return self._stubs['list_clusters'] - - @property - def get_cluster(self) -> Callable[ - [cluster_service.GetClusterRequest], - Awaitable[cluster_service.Cluster]]: - r"""Return a callable for the get cluster method over gRPC. - - Gets the details for a specific cluster. - - Returns: - Callable[[~.GetClusterRequest], - Awaitable[~.Cluster]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_cluster' not in self._stubs: - self._stubs['get_cluster'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/GetCluster', - request_serializer=cluster_service.GetClusterRequest.serialize, - response_deserializer=cluster_service.Cluster.deserialize, - ) - return self._stubs['get_cluster'] - - @property - def create_cluster(self) -> Callable[ - [cluster_service.CreateClusterRequest], - Awaitable[cluster_service.Operation]]: - r"""Return a callable for the create cluster method over gRPC. - - Creates a cluster, consisting of the specified number and type - of Google Compute Engine instances. - - By default, the cluster is created in the project's `default - network `__. - - One firewall is added for the cluster. After cluster creation, - the Kubelet creates routes for each node to allow the containers - on that node to communicate with all other instances in the - cluster. - - Finally, an entry is added to the project's global metadata - indicating which CIDR range the cluster is using. - - Returns: - Callable[[~.CreateClusterRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_cluster' not in self._stubs: - self._stubs['create_cluster'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/CreateCluster', - request_serializer=cluster_service.CreateClusterRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['create_cluster'] - - @property - def update_cluster(self) -> Callable[ - [cluster_service.UpdateClusterRequest], - Awaitable[cluster_service.Operation]]: - r"""Return a callable for the update cluster method over gRPC. - - Updates the settings for a specific cluster. - - Returns: - Callable[[~.UpdateClusterRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_cluster' not in self._stubs: - self._stubs['update_cluster'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/UpdateCluster', - request_serializer=cluster_service.UpdateClusterRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['update_cluster'] - - @property - def update_node_pool(self) -> Callable[ - [cluster_service.UpdateNodePoolRequest], - Awaitable[cluster_service.Operation]]: - r"""Return a callable for the update node pool method over gRPC. - - Updates the version and/or image type of a specific - node pool. - - Returns: - Callable[[~.UpdateNodePoolRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_node_pool' not in self._stubs: - self._stubs['update_node_pool'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/UpdateNodePool', - request_serializer=cluster_service.UpdateNodePoolRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['update_node_pool'] - - @property - def set_node_pool_autoscaling(self) -> Callable[ - [cluster_service.SetNodePoolAutoscalingRequest], - Awaitable[cluster_service.Operation]]: - r"""Return a callable for the set node pool autoscaling method over gRPC. - - Sets the autoscaling settings of a specific node - pool. - - Returns: - Callable[[~.SetNodePoolAutoscalingRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_node_pool_autoscaling' not in self._stubs: - self._stubs['set_node_pool_autoscaling'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/SetNodePoolAutoscaling', - request_serializer=cluster_service.SetNodePoolAutoscalingRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['set_node_pool_autoscaling'] - - @property - def set_logging_service(self) -> Callable[ - [cluster_service.SetLoggingServiceRequest], - Awaitable[cluster_service.Operation]]: - r"""Return a callable for the set logging service method over gRPC. - - Sets the logging service for a specific cluster. - - Returns: - Callable[[~.SetLoggingServiceRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_logging_service' not in self._stubs: - self._stubs['set_logging_service'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/SetLoggingService', - request_serializer=cluster_service.SetLoggingServiceRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['set_logging_service'] - - @property - def set_monitoring_service(self) -> Callable[ - [cluster_service.SetMonitoringServiceRequest], - Awaitable[cluster_service.Operation]]: - r"""Return a callable for the set monitoring service method over gRPC. - - Sets the monitoring service for a specific cluster. - - Returns: - Callable[[~.SetMonitoringServiceRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_monitoring_service' not in self._stubs: - self._stubs['set_monitoring_service'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/SetMonitoringService', - request_serializer=cluster_service.SetMonitoringServiceRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['set_monitoring_service'] - - @property - def set_addons_config(self) -> Callable[ - [cluster_service.SetAddonsConfigRequest], - Awaitable[cluster_service.Operation]]: - r"""Return a callable for the set addons config method over gRPC. - - Sets the addons for a specific cluster. - - Returns: - Callable[[~.SetAddonsConfigRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_addons_config' not in self._stubs: - self._stubs['set_addons_config'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/SetAddonsConfig', - request_serializer=cluster_service.SetAddonsConfigRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['set_addons_config'] - - @property - def set_locations(self) -> Callable[ - [cluster_service.SetLocationsRequest], - Awaitable[cluster_service.Operation]]: - r"""Return a callable for the set locations method over gRPC. - - Sets the locations for a specific cluster. Deprecated. Use - `projects.locations.clusters.update `__ - instead. - - Returns: - Callable[[~.SetLocationsRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_locations' not in self._stubs: - self._stubs['set_locations'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/SetLocations', - request_serializer=cluster_service.SetLocationsRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['set_locations'] - - @property - def update_master(self) -> Callable[ - [cluster_service.UpdateMasterRequest], - Awaitable[cluster_service.Operation]]: - r"""Return a callable for the update master method over gRPC. - - Updates the master for a specific cluster. - - Returns: - Callable[[~.UpdateMasterRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_master' not in self._stubs: - self._stubs['update_master'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/UpdateMaster', - request_serializer=cluster_service.UpdateMasterRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['update_master'] - - @property - def set_master_auth(self) -> Callable[ - [cluster_service.SetMasterAuthRequest], - Awaitable[cluster_service.Operation]]: - r"""Return a callable for the set master auth method over gRPC. - - Sets master auth materials. Currently supports - changing the admin password or a specific cluster, - either via password generation or explicitly setting the - password. - - Returns: - Callable[[~.SetMasterAuthRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_master_auth' not in self._stubs: - self._stubs['set_master_auth'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/SetMasterAuth', - request_serializer=cluster_service.SetMasterAuthRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['set_master_auth'] - - @property - def delete_cluster(self) -> Callable[ - [cluster_service.DeleteClusterRequest], - Awaitable[cluster_service.Operation]]: - r"""Return a callable for the delete cluster method over gRPC. - - Deletes the cluster, including the Kubernetes - endpoint and all worker nodes. - - Firewalls and routes that were configured during cluster - creation are also deleted. - - Other Google Compute Engine resources that might be in - use by the cluster, such as load balancer resources, are - not deleted if they weren't present when the cluster was - initially created. - - Returns: - Callable[[~.DeleteClusterRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_cluster' not in self._stubs: - self._stubs['delete_cluster'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/DeleteCluster', - request_serializer=cluster_service.DeleteClusterRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['delete_cluster'] - - @property - def list_operations(self) -> Callable[ - [cluster_service.ListOperationsRequest], - Awaitable[cluster_service.ListOperationsResponse]]: - r"""Return a callable for the list operations method over gRPC. - - Lists all operations in a project in the specified - zone or all zones. - - Returns: - Callable[[~.ListOperationsRequest], - Awaitable[~.ListOperationsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_operations' not in self._stubs: - self._stubs['list_operations'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/ListOperations', - request_serializer=cluster_service.ListOperationsRequest.serialize, - response_deserializer=cluster_service.ListOperationsResponse.deserialize, - ) - return self._stubs['list_operations'] - - @property - def get_operation(self) -> Callable[ - [cluster_service.GetOperationRequest], - Awaitable[cluster_service.Operation]]: - r"""Return a callable for the get operation method over gRPC. - - Gets the specified operation. - - Returns: - Callable[[~.GetOperationRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_operation' not in self._stubs: - self._stubs['get_operation'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/GetOperation', - request_serializer=cluster_service.GetOperationRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['get_operation'] - - @property - def cancel_operation(self) -> Callable[ - [cluster_service.CancelOperationRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the cancel operation method over gRPC. - - Cancels the specified operation. - - Returns: - Callable[[~.CancelOperationRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'cancel_operation' not in self._stubs: - self._stubs['cancel_operation'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/CancelOperation', - request_serializer=cluster_service.CancelOperationRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['cancel_operation'] - - @property - def get_server_config(self) -> Callable[ - [cluster_service.GetServerConfigRequest], - Awaitable[cluster_service.ServerConfig]]: - r"""Return a callable for the get server config method over gRPC. - - Returns configuration info about the Google - Kubernetes Engine service. - - Returns: - Callable[[~.GetServerConfigRequest], - Awaitable[~.ServerConfig]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_server_config' not in self._stubs: - self._stubs['get_server_config'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/GetServerConfig', - request_serializer=cluster_service.GetServerConfigRequest.serialize, - response_deserializer=cluster_service.ServerConfig.deserialize, - ) - return self._stubs['get_server_config'] - - @property - def list_node_pools(self) -> Callable[ - [cluster_service.ListNodePoolsRequest], - Awaitable[cluster_service.ListNodePoolsResponse]]: - r"""Return a callable for the list node pools method over gRPC. - - Lists the node pools for a cluster. - - Returns: - Callable[[~.ListNodePoolsRequest], - Awaitable[~.ListNodePoolsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_node_pools' not in self._stubs: - self._stubs['list_node_pools'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/ListNodePools', - request_serializer=cluster_service.ListNodePoolsRequest.serialize, - response_deserializer=cluster_service.ListNodePoolsResponse.deserialize, - ) - return self._stubs['list_node_pools'] - - @property - def get_json_web_keys(self) -> Callable[ - [cluster_service.GetJSONWebKeysRequest], - Awaitable[cluster_service.GetJSONWebKeysResponse]]: - r"""Return a callable for the get json web keys method over gRPC. - - Gets the public component of the cluster signing keys - in JSON Web Key format. - This API is not yet intended for general use, and is not - available for all clusters. - - Returns: - Callable[[~.GetJSONWebKeysRequest], - Awaitable[~.GetJSONWebKeysResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_json_web_keys' not in self._stubs: - self._stubs['get_json_web_keys'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/GetJSONWebKeys', - request_serializer=cluster_service.GetJSONWebKeysRequest.serialize, - response_deserializer=cluster_service.GetJSONWebKeysResponse.deserialize, - ) - return self._stubs['get_json_web_keys'] - - @property - def get_node_pool(self) -> Callable[ - [cluster_service.GetNodePoolRequest], - Awaitable[cluster_service.NodePool]]: - r"""Return a callable for the get node pool method over gRPC. - - Retrieves the requested node pool. - - Returns: - Callable[[~.GetNodePoolRequest], - Awaitable[~.NodePool]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_node_pool' not in self._stubs: - self._stubs['get_node_pool'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/GetNodePool', - request_serializer=cluster_service.GetNodePoolRequest.serialize, - response_deserializer=cluster_service.NodePool.deserialize, - ) - return self._stubs['get_node_pool'] - - @property - def create_node_pool(self) -> Callable[ - [cluster_service.CreateNodePoolRequest], - Awaitable[cluster_service.Operation]]: - r"""Return a callable for the create node pool method over gRPC. - - Creates a node pool for a cluster. - - Returns: - Callable[[~.CreateNodePoolRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_node_pool' not in self._stubs: - self._stubs['create_node_pool'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/CreateNodePool', - request_serializer=cluster_service.CreateNodePoolRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['create_node_pool'] - - @property - def delete_node_pool(self) -> Callable[ - [cluster_service.DeleteNodePoolRequest], - Awaitable[cluster_service.Operation]]: - r"""Return a callable for the delete node pool method over gRPC. - - Deletes a node pool from a cluster. - - Returns: - Callable[[~.DeleteNodePoolRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_node_pool' not in self._stubs: - self._stubs['delete_node_pool'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/DeleteNodePool', - request_serializer=cluster_service.DeleteNodePoolRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['delete_node_pool'] - - @property - def rollback_node_pool_upgrade(self) -> Callable[ - [cluster_service.RollbackNodePoolUpgradeRequest], - Awaitable[cluster_service.Operation]]: - r"""Return a callable for the rollback node pool upgrade method over gRPC. - - Rolls back a previously Aborted or Failed NodePool - upgrade. This makes no changes if the last upgrade - successfully completed. - - Returns: - Callable[[~.RollbackNodePoolUpgradeRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'rollback_node_pool_upgrade' not in self._stubs: - self._stubs['rollback_node_pool_upgrade'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/RollbackNodePoolUpgrade', - request_serializer=cluster_service.RollbackNodePoolUpgradeRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['rollback_node_pool_upgrade'] - - @property - def set_node_pool_management(self) -> Callable[ - [cluster_service.SetNodePoolManagementRequest], - Awaitable[cluster_service.Operation]]: - r"""Return a callable for the set node pool management method over gRPC. - - Sets the NodeManagement options for a node pool. - - Returns: - Callable[[~.SetNodePoolManagementRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_node_pool_management' not in self._stubs: - self._stubs['set_node_pool_management'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/SetNodePoolManagement', - request_serializer=cluster_service.SetNodePoolManagementRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['set_node_pool_management'] - - @property - def set_labels(self) -> Callable[ - [cluster_service.SetLabelsRequest], - Awaitable[cluster_service.Operation]]: - r"""Return a callable for the set labels method over gRPC. - - Sets labels on a cluster. - - Returns: - Callable[[~.SetLabelsRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_labels' not in self._stubs: - self._stubs['set_labels'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/SetLabels', - request_serializer=cluster_service.SetLabelsRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['set_labels'] - - @property - def set_legacy_abac(self) -> Callable[ - [cluster_service.SetLegacyAbacRequest], - Awaitable[cluster_service.Operation]]: - r"""Return a callable for the set legacy abac method over gRPC. - - Enables or disables the ABAC authorization mechanism - on a cluster. - - Returns: - Callable[[~.SetLegacyAbacRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_legacy_abac' not in self._stubs: - self._stubs['set_legacy_abac'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/SetLegacyAbac', - request_serializer=cluster_service.SetLegacyAbacRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['set_legacy_abac'] - - @property - def start_ip_rotation(self) -> Callable[ - [cluster_service.StartIPRotationRequest], - Awaitable[cluster_service.Operation]]: - r"""Return a callable for the start ip rotation method over gRPC. - - Starts master IP rotation. - - Returns: - Callable[[~.StartIPRotationRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'start_ip_rotation' not in self._stubs: - self._stubs['start_ip_rotation'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/StartIPRotation', - request_serializer=cluster_service.StartIPRotationRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['start_ip_rotation'] - - @property - def complete_ip_rotation(self) -> Callable[ - [cluster_service.CompleteIPRotationRequest], - Awaitable[cluster_service.Operation]]: - r"""Return a callable for the complete ip rotation method over gRPC. - - Completes master IP rotation. - - Returns: - Callable[[~.CompleteIPRotationRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'complete_ip_rotation' not in self._stubs: - self._stubs['complete_ip_rotation'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/CompleteIPRotation', - request_serializer=cluster_service.CompleteIPRotationRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['complete_ip_rotation'] - - @property - def set_node_pool_size(self) -> Callable[ - [cluster_service.SetNodePoolSizeRequest], - Awaitable[cluster_service.Operation]]: - r"""Return a callable for the set node pool size method over gRPC. - - Sets the size for a specific node pool. - - Returns: - Callable[[~.SetNodePoolSizeRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_node_pool_size' not in self._stubs: - self._stubs['set_node_pool_size'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/SetNodePoolSize', - request_serializer=cluster_service.SetNodePoolSizeRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['set_node_pool_size'] - - @property - def set_network_policy(self) -> Callable[ - [cluster_service.SetNetworkPolicyRequest], - Awaitable[cluster_service.Operation]]: - r"""Return a callable for the set network policy method over gRPC. - - Enables or disables Network Policy for a cluster. - - Returns: - Callable[[~.SetNetworkPolicyRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_network_policy' not in self._stubs: - self._stubs['set_network_policy'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/SetNetworkPolicy', - request_serializer=cluster_service.SetNetworkPolicyRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['set_network_policy'] - - @property - def set_maintenance_policy(self) -> Callable[ - [cluster_service.SetMaintenancePolicyRequest], - Awaitable[cluster_service.Operation]]: - r"""Return a callable for the set maintenance policy method over gRPC. - - Sets the maintenance policy for a cluster. - - Returns: - Callable[[~.SetMaintenancePolicyRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_maintenance_policy' not in self._stubs: - self._stubs['set_maintenance_policy'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/SetMaintenancePolicy', - request_serializer=cluster_service.SetMaintenancePolicyRequest.serialize, - response_deserializer=cluster_service.Operation.deserialize, - ) - return self._stubs['set_maintenance_policy'] - - @property - def list_usable_subnetworks(self) -> Callable[ - [cluster_service.ListUsableSubnetworksRequest], - Awaitable[cluster_service.ListUsableSubnetworksResponse]]: - r"""Return a callable for the list usable subnetworks method over gRPC. - - Lists subnetworks that can be used for creating - clusters in a project. - - Returns: - Callable[[~.ListUsableSubnetworksRequest], - Awaitable[~.ListUsableSubnetworksResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_usable_subnetworks' not in self._stubs: - self._stubs['list_usable_subnetworks'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/ListUsableSubnetworks', - request_serializer=cluster_service.ListUsableSubnetworksRequest.serialize, - response_deserializer=cluster_service.ListUsableSubnetworksResponse.deserialize, - ) - return self._stubs['list_usable_subnetworks'] - - @property - def list_locations(self) -> Callable[ - [cluster_service.ListLocationsRequest], - Awaitable[cluster_service.ListLocationsResponse]]: - r"""Return a callable for the list locations method over gRPC. - - Fetches locations that offer Google Kubernetes - Engine. - - Returns: - Callable[[~.ListLocationsRequest], - Awaitable[~.ListLocationsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_locations' not in self._stubs: - self._stubs['list_locations'] = self.grpc_channel.unary_unary( - '/google.container.v1beta1.ClusterManager/ListLocations', - request_serializer=cluster_service.ListLocationsRequest.serialize, - response_deserializer=cluster_service.ListLocationsResponse.deserialize, - ) - return self._stubs['list_locations'] - - -__all__ = ( - 'ClusterManagerGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/v1beta1/google/container_v1beta1/types/__init__.py b/owl-bot-staging/v1beta1/google/container_v1beta1/types/__init__.py deleted file mode 100644 index 6184d07a..00000000 --- a/owl-bot-staging/v1beta1/google/container_v1beta1/types/__init__.py +++ /dev/null @@ -1,246 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .cluster_service import ( - AcceleratorConfig, - AddonsConfig, - AuthenticatorGroupsConfig, - AutoprovisioningNodePoolDefaults, - AutoUpgradeOptions, - BinaryAuthorization, - CancelOperationRequest, - ClientCertificateConfig, - CloudRunConfig, - Cluster, - ClusterAutoscaling, - ClusterTelemetry, - ClusterUpdate, - CompleteIPRotationRequest, - ConfidentialNodes, - ConfigConnectorConfig, - CreateClusterRequest, - CreateNodePoolRequest, - DailyMaintenanceWindow, - DatabaseEncryption, - DefaultSnatStatus, - DeleteClusterRequest, - DeleteNodePoolRequest, - DnsCacheConfig, - EphemeralStorageConfig, - GcePersistentDiskCsiDriverConfig, - GetClusterRequest, - GetJSONWebKeysRequest, - GetJSONWebKeysResponse, - GetNodePoolRequest, - GetOpenIDConfigRequest, - GetOpenIDConfigResponse, - GetOperationRequest, - GetServerConfigRequest, - HorizontalPodAutoscaling, - HttpLoadBalancing, - IntraNodeVisibilityConfig, - IPAllocationPolicy, - IstioConfig, - Jwk, - KalmConfig, - KubernetesDashboard, - LegacyAbac, - LinuxNodeConfig, - ListClustersRequest, - ListClustersResponse, - ListLocationsRequest, - ListLocationsResponse, - ListNodePoolsRequest, - ListNodePoolsResponse, - ListOperationsRequest, - ListOperationsResponse, - ListUsableSubnetworksRequest, - ListUsableSubnetworksResponse, - Location, - MaintenancePolicy, - MaintenanceWindow, - Master, - MasterAuth, - MasterAuthorizedNetworksConfig, - MaxPodsConstraint, - NetworkConfig, - NetworkPolicy, - NetworkPolicyConfig, - NodeConfig, - NodeKubeletConfig, - NodeManagement, - NodePool, - NodePoolAutoscaling, - NodeTaint, - NotificationConfig, - Operation, - OperationProgress, - PodSecurityPolicyConfig, - PrivateClusterConfig, - PrivateClusterMasterGlobalAccessConfig, - RecurringTimeWindow, - ReleaseChannel, - ReservationAffinity, - ResourceLimit, - ResourceUsageExportConfig, - RollbackNodePoolUpgradeRequest, - SandboxConfig, - ServerConfig, - SetAddonsConfigRequest, - SetLabelsRequest, - SetLegacyAbacRequest, - SetLocationsRequest, - SetLoggingServiceRequest, - SetMaintenancePolicyRequest, - SetMasterAuthRequest, - SetMonitoringServiceRequest, - SetNetworkPolicyRequest, - SetNodePoolAutoscalingRequest, - SetNodePoolManagementRequest, - SetNodePoolSizeRequest, - ShieldedInstanceConfig, - ShieldedNodes, - StartIPRotationRequest, - StatusCondition, - TimeWindow, - TpuConfig, - UpdateClusterRequest, - UpdateMasterRequest, - UpdateNodePoolRequest, - UpgradeEvent, - UsableSubnetwork, - UsableSubnetworkSecondaryRange, - VerticalPodAutoscaling, - WorkloadIdentityConfig, - WorkloadMetadataConfig, - DatapathProvider, - UpgradeResourceType, -) - -__all__ = ( - 'AcceleratorConfig', - 'AddonsConfig', - 'AuthenticatorGroupsConfig', - 'AutoprovisioningNodePoolDefaults', - 'AutoUpgradeOptions', - 'BinaryAuthorization', - 'CancelOperationRequest', - 'ClientCertificateConfig', - 'CloudRunConfig', - 'Cluster', - 'ClusterAutoscaling', - 'ClusterTelemetry', - 'ClusterUpdate', - 'CompleteIPRotationRequest', - 'ConfidentialNodes', - 'ConfigConnectorConfig', - 'CreateClusterRequest', - 'CreateNodePoolRequest', - 'DailyMaintenanceWindow', - 'DatabaseEncryption', - 'DefaultSnatStatus', - 'DeleteClusterRequest', - 'DeleteNodePoolRequest', - 'DnsCacheConfig', - 'EphemeralStorageConfig', - 'GcePersistentDiskCsiDriverConfig', - 'GetClusterRequest', - 'GetJSONWebKeysRequest', - 'GetJSONWebKeysResponse', - 'GetNodePoolRequest', - 'GetOpenIDConfigRequest', - 'GetOpenIDConfigResponse', - 'GetOperationRequest', - 'GetServerConfigRequest', - 'HorizontalPodAutoscaling', - 'HttpLoadBalancing', - 'IntraNodeVisibilityConfig', - 'IPAllocationPolicy', - 'IstioConfig', - 'Jwk', - 'KalmConfig', - 'KubernetesDashboard', - 'LegacyAbac', - 'LinuxNodeConfig', - 'ListClustersRequest', - 'ListClustersResponse', - 'ListLocationsRequest', - 'ListLocationsResponse', - 'ListNodePoolsRequest', - 'ListNodePoolsResponse', - 'ListOperationsRequest', - 'ListOperationsResponse', - 'ListUsableSubnetworksRequest', - 'ListUsableSubnetworksResponse', - 'Location', - 'MaintenancePolicy', - 'MaintenanceWindow', - 'Master', - 'MasterAuth', - 'MasterAuthorizedNetworksConfig', - 'MaxPodsConstraint', - 'NetworkConfig', - 'NetworkPolicy', - 'NetworkPolicyConfig', - 'NodeConfig', - 'NodeKubeletConfig', - 'NodeManagement', - 'NodePool', - 'NodePoolAutoscaling', - 'NodeTaint', - 'NotificationConfig', - 'Operation', - 'OperationProgress', - 'PodSecurityPolicyConfig', - 'PrivateClusterConfig', - 'PrivateClusterMasterGlobalAccessConfig', - 'RecurringTimeWindow', - 'ReleaseChannel', - 'ReservationAffinity', - 'ResourceLimit', - 'ResourceUsageExportConfig', - 'RollbackNodePoolUpgradeRequest', - 'SandboxConfig', - 'ServerConfig', - 'SetAddonsConfigRequest', - 'SetLabelsRequest', - 'SetLegacyAbacRequest', - 'SetLocationsRequest', - 'SetLoggingServiceRequest', - 'SetMaintenancePolicyRequest', - 'SetMasterAuthRequest', - 'SetMonitoringServiceRequest', - 'SetNetworkPolicyRequest', - 'SetNodePoolAutoscalingRequest', - 'SetNodePoolManagementRequest', - 'SetNodePoolSizeRequest', - 'ShieldedInstanceConfig', - 'ShieldedNodes', - 'StartIPRotationRequest', - 'StatusCondition', - 'TimeWindow', - 'TpuConfig', - 'UpdateClusterRequest', - 'UpdateMasterRequest', - 'UpdateNodePoolRequest', - 'UpgradeEvent', - 'UsableSubnetwork', - 'UsableSubnetworkSecondaryRange', - 'VerticalPodAutoscaling', - 'WorkloadIdentityConfig', - 'WorkloadMetadataConfig', - 'DatapathProvider', - 'UpgradeResourceType', -) diff --git a/owl-bot-staging/v1beta1/google/container_v1beta1/types/cluster_service.py b/owl-bot-staging/v1beta1/google/container_v1beta1/types/cluster_service.py deleted file mode 100644 index bec8d843..00000000 --- a/owl-bot-staging/v1beta1/google/container_v1beta1/types/cluster_service.py +++ /dev/null @@ -1,5866 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import proto # type: ignore - -from google.protobuf import timestamp_pb2 # type: ignore -from google.protobuf import wrappers_pb2 # type: ignore -from google.rpc import code_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.container.v1beta1', - manifest={ - 'DatapathProvider', - 'UpgradeResourceType', - 'LinuxNodeConfig', - 'NodeKubeletConfig', - 'NodeConfig', - 'ShieldedInstanceConfig', - 'SandboxConfig', - 'EphemeralStorageConfig', - 'ReservationAffinity', - 'NodeTaint', - 'MasterAuth', - 'ClientCertificateConfig', - 'AddonsConfig', - 'HttpLoadBalancing', - 'HorizontalPodAutoscaling', - 'KubernetesDashboard', - 'NetworkPolicyConfig', - 'DnsCacheConfig', - 'KalmConfig', - 'ConfigConnectorConfig', - 'GcePersistentDiskCsiDriverConfig', - 'PrivateClusterMasterGlobalAccessConfig', - 'PrivateClusterConfig', - 'IstioConfig', - 'CloudRunConfig', - 'MasterAuthorizedNetworksConfig', - 'LegacyAbac', - 'NetworkPolicy', - 'IPAllocationPolicy', - 'BinaryAuthorization', - 'PodSecurityPolicyConfig', - 'AuthenticatorGroupsConfig', - 'ClusterTelemetry', - 'Cluster', - 'ClusterUpdate', - 'Operation', - 'OperationProgress', - 'CreateClusterRequest', - 'GetClusterRequest', - 'UpdateClusterRequest', - 'UpdateNodePoolRequest', - 'SetNodePoolAutoscalingRequest', - 'SetLoggingServiceRequest', - 'SetMonitoringServiceRequest', - 'SetAddonsConfigRequest', - 'SetLocationsRequest', - 'UpdateMasterRequest', - 'SetMasterAuthRequest', - 'DeleteClusterRequest', - 'ListClustersRequest', - 'ListClustersResponse', - 'GetOperationRequest', - 'ListOperationsRequest', - 'CancelOperationRequest', - 'ListOperationsResponse', - 'GetServerConfigRequest', - 'ServerConfig', - 'CreateNodePoolRequest', - 'DeleteNodePoolRequest', - 'ListNodePoolsRequest', - 'GetNodePoolRequest', - 'NodePool', - 'NodeManagement', - 'AutoUpgradeOptions', - 'MaintenancePolicy', - 'MaintenanceWindow', - 'TimeWindow', - 'RecurringTimeWindow', - 'DailyMaintenanceWindow', - 'SetNodePoolManagementRequest', - 'SetNodePoolSizeRequest', - 'RollbackNodePoolUpgradeRequest', - 'ListNodePoolsResponse', - 'ClusterAutoscaling', - 'AutoprovisioningNodePoolDefaults', - 'ResourceLimit', - 'NodePoolAutoscaling', - 'SetLabelsRequest', - 'SetLegacyAbacRequest', - 'StartIPRotationRequest', - 'CompleteIPRotationRequest', - 'AcceleratorConfig', - 'WorkloadMetadataConfig', - 'SetNetworkPolicyRequest', - 'SetMaintenancePolicyRequest', - 'ListLocationsRequest', - 'ListLocationsResponse', - 'Location', - 'StatusCondition', - 'NetworkConfig', - 'ListUsableSubnetworksRequest', - 'ListUsableSubnetworksResponse', - 'UsableSubnetworkSecondaryRange', - 'UsableSubnetwork', - 'VerticalPodAutoscaling', - 'DefaultSnatStatus', - 'IntraNodeVisibilityConfig', - 'MaxPodsConstraint', - 'WorkloadIdentityConfig', - 'DatabaseEncryption', - 'ResourceUsageExportConfig', - 'ShieldedNodes', - 'GetOpenIDConfigRequest', - 'GetOpenIDConfigResponse', - 'GetJSONWebKeysRequest', - 'Jwk', - 'GetJSONWebKeysResponse', - 'ReleaseChannel', - 'TpuConfig', - 'Master', - 'NotificationConfig', - 'ConfidentialNodes', - 'UpgradeEvent', - }, -) - - -class DatapathProvider(proto.Enum): - r"""The datapath provider selects the implementation of the - Kubernetes networking // model for service resolution and - network policy enforcement. - """ - DATAPATH_PROVIDER_UNSPECIFIED = 0 - LEGACY_DATAPATH = 1 - ADVANCED_DATAPATH = 2 - - -class UpgradeResourceType(proto.Enum): - r"""UpgradeResourceType is the resource type that is upgrading. - It is used in upgrade notifications. - """ - UPGRADE_RESOURCE_TYPE_UNSPECIFIED = 0 - MASTER = 1 - NODE_POOL = 2 - - -class LinuxNodeConfig(proto.Message): - r"""Parameters that can be configured on Linux nodes. - Attributes: - sysctls (Sequence[google.container_v1beta1.types.LinuxNodeConfig.SysctlsEntry]): - The Linux kernel parameters to be applied to the nodes and - all pods running on the nodes. - - The following parameters are supported. - - net.core.netdev_max_backlog net.core.rmem_max - net.core.wmem_default net.core.wmem_max net.core.optmem_max - net.core.somaxconn net.ipv4.tcp_rmem net.ipv4.tcp_wmem - net.ipv4.tcp_tw_reuse - """ - - sysctls = proto.MapField( - proto.STRING, - proto.STRING, - number=1, - ) - - -class NodeKubeletConfig(proto.Message): - r"""Node kubelet configs. - Attributes: - cpu_manager_policy (str): - Control the CPU management policy on the - node. See - https://kubernetes.io/docs/tasks/administer- - cluster/cpu-management-policies/ - The following values are allowed. - - "none": the default, which represents the - existing scheduling behavior. - "static": - allows pods with certain resource - characteristics to be granted - increased CPU affinity and exclusivity on the - node. The default value is 'none' if - unspecified. - cpu_cfs_quota (google.protobuf.wrappers_pb2.BoolValue): - Enable CPU CFS quota enforcement for - containers that specify CPU limits. - This option is enabled by default which makes - kubelet use CFS quota - (https://www.kernel.org/doc/Documentation/scheduler/sched- - bwc.txt) to enforce container CPU limits. - Otherwise, CPU limits will not be enforced at - all. - - Disable this option to mitigate CPU throttling - problems while still having your pods to be in - Guaranteed QoS class by specifying the CPU - limits. - The default value is 'true' if unspecified. - cpu_cfs_quota_period (str): - Set the CPU CFS quota period value 'cpu.cfs_period_us'. - - The string must be a sequence of decimal numbers, each with - optional fraction and a unit suffix, such as "300ms". Valid - time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". - The value must be a positive duration. - """ - - cpu_manager_policy = proto.Field( - proto.STRING, - number=1, - ) - cpu_cfs_quota = proto.Field( - proto.MESSAGE, - number=2, - message=wrappers_pb2.BoolValue, - ) - cpu_cfs_quota_period = proto.Field( - proto.STRING, - number=3, - ) - - -class NodeConfig(proto.Message): - r"""Parameters that describe the nodes in a cluster. - Attributes: - machine_type (str): - The name of a Google Compute Engine `machine - type `__. - - If unspecified, the default machine type is ``e2-medium``. - disk_size_gb (int): - Size of the disk attached to each node, - specified in GB. The smallest allowed disk size - is 10GB. - If unspecified, the default disk size is 100GB. - oauth_scopes (Sequence[str]): - The set of Google API scopes to be made available on all of - the node VMs under the "default" service account. - - The following scopes are recommended, but not required, and - by default are not included: - - - ``https://www.googleapis.com/auth/compute`` is required - for mounting persistent storage on your nodes. - - ``https://www.googleapis.com/auth/devstorage.read_only`` - is required for communicating with **gcr.io** (the - `Google Container - Registry `__). - - If unspecified, no scopes are added, unless Cloud Logging or - Cloud Monitoring are enabled, in which case their required - scopes will be added. - service_account (str): - The Google Cloud Platform Service Account to - be used by the node VMs. Specify the email - address of the Service Account; otherwise, if no - Service Account is specified, the "default" - service account is used. - metadata (Sequence[google.container_v1beta1.types.NodeConfig.MetadataEntry]): - The metadata key/value pairs assigned to instances in the - cluster. - - Keys must conform to the regexp ``[a-zA-Z0-9-_]+`` and be - less than 128 bytes in length. These are reflected as part - of a URL in the metadata server. Additionally, to avoid - ambiguity, keys must not conflict with any other metadata - keys for the project or be one of the reserved keys: - - - "cluster-location" - - "cluster-name" - - "cluster-uid" - - "configure-sh" - - "containerd-configure-sh" - - "enable-oslogin" - - "gci-ensure-gke-docker" - - "gci-metrics-enabled" - - "gci-update-strategy" - - "instance-template" - - "kube-env" - - "startup-script" - - "user-data" - - "disable-address-manager" - - "windows-startup-script-ps1" - - "common-psm1" - - "k8s-node-setup-psm1" - - "install-ssh-psm1" - - "user-profile-psm1" - - The following keys are reserved for Windows nodes: - - - "serial-port-logging-enable" - - Values are free-form strings, and only have meaning as - interpreted by the image running in the instance. The only - restriction placed on them is that each value's size must be - less than or equal to 32 KB. - - The total size of all keys and values must be less than 512 - KB. - image_type (str): - The image type to use for this node. Note - that for a given image type, the latest version - of it will be used. - labels (Sequence[google.container_v1beta1.types.NodeConfig.LabelsEntry]): - The map of Kubernetes labels (key/value - pairs) to be applied to each node. These will - added in addition to any default label(s) that - Kubernetes may apply to the node. - In case of conflict in label keys, the applied - set may differ depending on the Kubernetes - version -- it's best to assume the behavior is - undefined and conflicts should be avoided. - For more information, including usage and the - valid values, see: - https://kubernetes.io/docs/concepts/overview/working- - with-objects/labels/ - local_ssd_count (int): - The number of local SSD disks to be attached - to the node. - The limit for this value is dependent upon the - maximum number of disks available on a machine - per zone. See: - https://cloud.google.com/compute/docs/disks/local- - ssd for more information. - tags (Sequence[str]): - The list of instance tags applied to all - nodes. Tags are used to identify valid sources - or targets for network firewalls and are - specified by the client during cluster or node - pool creation. Each tag within the list must - comply with RFC1035. - preemptible (bool): - Whether the nodes are created as preemptible - VM instances. See: - https://cloud.google.com/compute/docs/instances/preemptible - for more inforamtion about preemptible VM - instances. - accelerators (Sequence[google.container_v1beta1.types.AcceleratorConfig]): - A list of hardware accelerators to be - attached to each node. See - https://cloud.google.com/compute/docs/gpus for - more information about support for GPUs. - sandbox_config (google.container_v1beta1.types.SandboxConfig): - Sandbox configuration for this node. - node_group (str): - Setting this field will assign instances of this pool to run - on the specified node group. This is useful for running - workloads on `sole tenant - nodes `__. - reservation_affinity (google.container_v1beta1.types.ReservationAffinity): - The optional reservation affinity. Setting this field will - apply the specified `Zonal Compute - Reservation `__ - to this node pool. - disk_type (str): - Type of the disk attached to each node (e.g. - 'pd-standard', 'pd-ssd' or 'pd-balanced') - - If unspecified, the default disk type is 'pd- - standard' - min_cpu_platform (str): - Minimum CPU platform to be used by this instance. The - instance may be scheduled on the specified or newer CPU - platform. Applicable values are the friendly names of CPU - platforms, such as ``minCpuPlatform: "Intel Haswell"`` or - ``minCpuPlatform: "Intel Sandy Bridge"``. For more - information, read `how to specify min CPU - platform `__ - workload_metadata_config (google.container_v1beta1.types.WorkloadMetadataConfig): - The workload metadata configuration for this - node. - taints (Sequence[google.container_v1beta1.types.NodeTaint]): - List of kubernetes taints to be applied to - each node. - For more information, including usage and the - valid values, see: - https://kubernetes.io/docs/concepts/configuration/taint- - and-toleration/ - boot_disk_kms_key (str): - The Customer Managed Encryption Key used to encrypt the boot - disk attached to each node in the node pool. This should be - of the form - projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. - For more information about protecting resources with Cloud - KMS Keys please see: - https://cloud.google.com/compute/docs/disks/customer-managed-encryption - shielded_instance_config (google.container_v1beta1.types.ShieldedInstanceConfig): - Shielded Instance options. - linux_node_config (google.container_v1beta1.types.LinuxNodeConfig): - Parameters that can be configured on Linux - nodes. - kubelet_config (google.container_v1beta1.types.NodeKubeletConfig): - Node kubelet configs. - ephemeral_storage_config (google.container_v1beta1.types.EphemeralStorageConfig): - Parameters for the ephemeral storage - filesystem. If unspecified, ephemeral storage is - backed by the boot disk. - """ - - machine_type = proto.Field( - proto.STRING, - number=1, - ) - disk_size_gb = proto.Field( - proto.INT32, - number=2, - ) - oauth_scopes = proto.RepeatedField( - proto.STRING, - number=3, - ) - service_account = proto.Field( - proto.STRING, - number=9, - ) - metadata = proto.MapField( - proto.STRING, - proto.STRING, - number=4, - ) - image_type = proto.Field( - proto.STRING, - number=5, - ) - labels = proto.MapField( - proto.STRING, - proto.STRING, - number=6, - ) - local_ssd_count = proto.Field( - proto.INT32, - number=7, - ) - tags = proto.RepeatedField( - proto.STRING, - number=8, - ) - preemptible = proto.Field( - proto.BOOL, - number=10, - ) - accelerators = proto.RepeatedField( - proto.MESSAGE, - number=11, - message='AcceleratorConfig', - ) - sandbox_config = proto.Field( - proto.MESSAGE, - number=17, - message='SandboxConfig', - ) - node_group = proto.Field( - proto.STRING, - number=18, - ) - reservation_affinity = proto.Field( - proto.MESSAGE, - number=19, - message='ReservationAffinity', - ) - disk_type = proto.Field( - proto.STRING, - number=12, - ) - min_cpu_platform = proto.Field( - proto.STRING, - number=13, - ) - workload_metadata_config = proto.Field( - proto.MESSAGE, - number=14, - message='WorkloadMetadataConfig', - ) - taints = proto.RepeatedField( - proto.MESSAGE, - number=15, - message='NodeTaint', - ) - boot_disk_kms_key = proto.Field( - proto.STRING, - number=23, - ) - shielded_instance_config = proto.Field( - proto.MESSAGE, - number=20, - message='ShieldedInstanceConfig', - ) - linux_node_config = proto.Field( - proto.MESSAGE, - number=21, - message='LinuxNodeConfig', - ) - kubelet_config = proto.Field( - proto.MESSAGE, - number=22, - message='NodeKubeletConfig', - ) - ephemeral_storage_config = proto.Field( - proto.MESSAGE, - number=24, - message='EphemeralStorageConfig', - ) - - -class ShieldedInstanceConfig(proto.Message): - r"""A set of Shielded Instance options. - Attributes: - enable_secure_boot (bool): - Defines whether the instance has Secure Boot - enabled. - Secure Boot helps ensure that the system only - runs authentic software by verifying the digital - signature of all boot components, and halting - the boot process if signature verification - fails. - enable_integrity_monitoring (bool): - Defines whether the instance has integrity - monitoring enabled. - Enables monitoring and attestation of the boot - integrity of the instance. The attestation is - performed against the integrity policy baseline. - This baseline is initially derived from the - implicitly trusted boot image when the instance - is created. - """ - - enable_secure_boot = proto.Field( - proto.BOOL, - number=1, - ) - enable_integrity_monitoring = proto.Field( - proto.BOOL, - number=2, - ) - - -class SandboxConfig(proto.Message): - r"""SandboxConfig contains configurations of the sandbox to use - for the node. - - Attributes: - sandbox_type (str): - Type of the sandbox to use for the node (e.g. - 'gvisor') - type_ (google.container_v1beta1.types.SandboxConfig.Type): - Type of the sandbox to use for the node. - """ - class Type(proto.Enum): - r"""Possible types of sandboxes.""" - UNSPECIFIED = 0 - GVISOR = 1 - - sandbox_type = proto.Field( - proto.STRING, - number=1, - ) - type_ = proto.Field( - proto.ENUM, - number=2, - enum=Type, - ) - - -class EphemeralStorageConfig(proto.Message): - r"""EphemeralStorageConfig contains configuration for the - ephemeral storage filesystem. - - Attributes: - local_ssd_count (int): - Number of local SSDs to use to back ephemeral - storage. Uses NVMe interfaces. Each local SSD is - 375 GB in size. If zero, it means to disable - using local SSDs as ephemeral storage. - """ - - local_ssd_count = proto.Field( - proto.INT32, - number=1, - ) - - -class ReservationAffinity(proto.Message): - r"""`ReservationAffinity `__ - is the configuration of desired reservation which instances could - take capacity from. - - Attributes: - consume_reservation_type (google.container_v1beta1.types.ReservationAffinity.Type): - Corresponds to the type of reservation - consumption. - key (str): - Corresponds to the label key of a reservation resource. To - target a SPECIFIC_RESERVATION by name, specify - "googleapis.com/reservation-name" as the key and specify the - name of your reservation as its value. - values (Sequence[str]): - Corresponds to the label value(s) of - reservation resource(s). - """ - class Type(proto.Enum): - r"""Indicates whether to consume capacity from a reservation or - not. - """ - UNSPECIFIED = 0 - NO_RESERVATION = 1 - ANY_RESERVATION = 2 - SPECIFIC_RESERVATION = 3 - - consume_reservation_type = proto.Field( - proto.ENUM, - number=1, - enum=Type, - ) - key = proto.Field( - proto.STRING, - number=2, - ) - values = proto.RepeatedField( - proto.STRING, - number=3, - ) - - -class NodeTaint(proto.Message): - r"""Kubernetes taint is comprised of three fields: key, value, and - effect. Effect can only be one of three types: NoSchedule, - PreferNoSchedule or NoExecute. - - See - `here `__ - for more information, including usage and the valid values. - - Attributes: - key (str): - Key for taint. - value (str): - Value for taint. - effect (google.container_v1beta1.types.NodeTaint.Effect): - Effect for taint. - """ - class Effect(proto.Enum): - r"""Possible values for Effect in taint.""" - EFFECT_UNSPECIFIED = 0 - NO_SCHEDULE = 1 - PREFER_NO_SCHEDULE = 2 - NO_EXECUTE = 3 - - key = proto.Field( - proto.STRING, - number=1, - ) - value = proto.Field( - proto.STRING, - number=2, - ) - effect = proto.Field( - proto.ENUM, - number=3, - enum=Effect, - ) - - -class MasterAuth(proto.Message): - r"""The authentication information for accessing the master - endpoint. Authentication can be done using HTTP basic auth or - using client certificates. - - Attributes: - username (str): - The username to use for HTTP basic - authentication to the master endpoint. For - clusters v1.6.0 and later, basic authentication - can be disabled by leaving username unspecified - (or setting it to the empty string). - Warning: basic authentication is deprecated, and - will be removed in GKE control plane versions - 1.19 and newer. For a list of recommended - authentication methods, see: - https://cloud.google.com/kubernetes- - engine/docs/how-to/api-server-authentication - password (str): - The password to use for HTTP basic - authentication to the master endpoint. Because - the master endpoint is open to the Internet, you - should create a strong password. If a password - is provided for cluster creation, username must - be non-empty. - - Warning: basic authentication is deprecated, and - will be removed in GKE control plane versions - 1.19 and newer. For a list of recommended - authentication methods, see: - https://cloud.google.com/kubernetes- - engine/docs/how-to/api-server-authentication - client_certificate_config (google.container_v1beta1.types.ClientCertificateConfig): - Configuration for client certificate - authentication on the cluster. For clusters - before v1.12, if no configuration is specified, - a client certificate is issued. - cluster_ca_certificate (str): - - client_certificate (str): - [Output only] Base64-encoded public certificate used by - clients to authenticate to the cluster endpoint. - client_key (str): - [Output only] Base64-encoded private key used by clients to - authenticate to the cluster endpoint. - """ - - username = proto.Field( - proto.STRING, - number=1, - ) - password = proto.Field( - proto.STRING, - number=2, - ) - client_certificate_config = proto.Field( - proto.MESSAGE, - number=3, - message='ClientCertificateConfig', - ) - cluster_ca_certificate = proto.Field( - proto.STRING, - number=100, - ) - client_certificate = proto.Field( - proto.STRING, - number=101, - ) - client_key = proto.Field( - proto.STRING, - number=102, - ) - - -class ClientCertificateConfig(proto.Message): - r"""Configuration for client certificates on the cluster. - Attributes: - issue_client_certificate (bool): - Issue a client certificate. - """ - - issue_client_certificate = proto.Field( - proto.BOOL, - number=1, - ) - - -class AddonsConfig(proto.Message): - r"""Configuration for the addons that can be automatically spun - up in the cluster, enabling additional functionality. - - Attributes: - http_load_balancing (google.container_v1beta1.types.HttpLoadBalancing): - Configuration for the HTTP (L7) load - balancing controller addon, which makes it easy - to set up HTTP load balancers for services in a - cluster. - horizontal_pod_autoscaling (google.container_v1beta1.types.HorizontalPodAutoscaling): - Configuration for the horizontal pod - autoscaling feature, which increases or - decreases the number of replica pods a - replication controller has based on the resource - usage of the existing pods. - kubernetes_dashboard (google.container_v1beta1.types.KubernetesDashboard): - Configuration for the Kubernetes Dashboard. - This addon is deprecated, and will be disabled - in 1.15. It is recommended to use the Cloud - Console to manage and monitor your Kubernetes - clusters, workloads and applications. For more - information, see: - https://cloud.google.com/kubernetes- - engine/docs/concepts/dashboards - network_policy_config (google.container_v1beta1.types.NetworkPolicyConfig): - Configuration for NetworkPolicy. This only - tracks whether the addon is enabled or not on - the Master, it does not track whether network - policy is enabled for the nodes. - istio_config (google.container_v1beta1.types.IstioConfig): - Configuration for Istio, an open platform to - connect, manage, and secure microservices. - cloud_run_config (google.container_v1beta1.types.CloudRunConfig): - Configuration for the Cloud Run addon. The ``IstioConfig`` - addon must be enabled in order to enable Cloud Run addon. - This option can only be enabled at cluster creation time. - dns_cache_config (google.container_v1beta1.types.DnsCacheConfig): - Configuration for NodeLocalDNS, a dns cache - running on cluster nodes - config_connector_config (google.container_v1beta1.types.ConfigConnectorConfig): - Configuration for the ConfigConnector add-on, - a Kubernetes extension to manage hosted GCP - services through the Kubernetes API - gce_persistent_disk_csi_driver_config (google.container_v1beta1.types.GcePersistentDiskCsiDriverConfig): - Configuration for the Compute Engine - Persistent Disk CSI driver. - kalm_config (google.container_v1beta1.types.KalmConfig): - Configuration for the KALM addon, which - manages the lifecycle of k8s applications. - """ - - http_load_balancing = proto.Field( - proto.MESSAGE, - number=1, - message='HttpLoadBalancing', - ) - horizontal_pod_autoscaling = proto.Field( - proto.MESSAGE, - number=2, - message='HorizontalPodAutoscaling', - ) - kubernetes_dashboard = proto.Field( - proto.MESSAGE, - number=3, - message='KubernetesDashboard', - ) - network_policy_config = proto.Field( - proto.MESSAGE, - number=4, - message='NetworkPolicyConfig', - ) - istio_config = proto.Field( - proto.MESSAGE, - number=5, - message='IstioConfig', - ) - cloud_run_config = proto.Field( - proto.MESSAGE, - number=7, - message='CloudRunConfig', - ) - dns_cache_config = proto.Field( - proto.MESSAGE, - number=8, - message='DnsCacheConfig', - ) - config_connector_config = proto.Field( - proto.MESSAGE, - number=10, - message='ConfigConnectorConfig', - ) - gce_persistent_disk_csi_driver_config = proto.Field( - proto.MESSAGE, - number=11, - message='GcePersistentDiskCsiDriverConfig', - ) - kalm_config = proto.Field( - proto.MESSAGE, - number=12, - message='KalmConfig', - ) - - -class HttpLoadBalancing(proto.Message): - r"""Configuration options for the HTTP (L7) load balancing - controller addon, which makes it easy to set up HTTP load - balancers for services in a cluster. - - Attributes: - disabled (bool): - Whether the HTTP Load Balancing controller is - enabled in the cluster. When enabled, it runs a - small pod in the cluster that manages the load - balancers. - """ - - disabled = proto.Field( - proto.BOOL, - number=1, - ) - - -class HorizontalPodAutoscaling(proto.Message): - r"""Configuration options for the horizontal pod autoscaling - feature, which increases or decreases the number of replica pods - a replication controller has based on the resource usage of the - existing pods. - - Attributes: - disabled (bool): - Whether the Horizontal Pod Autoscaling - feature is enabled in the cluster. When enabled, - it ensures that metrics are collected into - Stackdriver Monitoring. - """ - - disabled = proto.Field( - proto.BOOL, - number=1, - ) - - -class KubernetesDashboard(proto.Message): - r"""Configuration for the Kubernetes Dashboard. - Attributes: - disabled (bool): - Whether the Kubernetes Dashboard is enabled - for this cluster. - """ - - disabled = proto.Field( - proto.BOOL, - number=1, - ) - - -class NetworkPolicyConfig(proto.Message): - r"""Configuration for NetworkPolicy. This only tracks whether the - addon is enabled or not on the Master, it does not track whether - network policy is enabled for the nodes. - - Attributes: - disabled (bool): - Whether NetworkPolicy is enabled for this - cluster. - """ - - disabled = proto.Field( - proto.BOOL, - number=1, - ) - - -class DnsCacheConfig(proto.Message): - r"""Configuration for NodeLocal DNSCache - Attributes: - enabled (bool): - Whether NodeLocal DNSCache is enabled for - this cluster. - """ - - enabled = proto.Field( - proto.BOOL, - number=1, - ) - - -class KalmConfig(proto.Message): - r"""Configuration options for the KALM addon. - Attributes: - enabled (bool): - Whether KALM is enabled for this cluster. - """ - - enabled = proto.Field( - proto.BOOL, - number=1, - ) - - -class ConfigConnectorConfig(proto.Message): - r"""Configuration options for the Config Connector add-on. - Attributes: - enabled (bool): - Whether Cloud Connector is enabled for this - cluster. - """ - - enabled = proto.Field( - proto.BOOL, - number=1, - ) - - -class GcePersistentDiskCsiDriverConfig(proto.Message): - r"""Configuration for the Compute Engine PD CSI driver. This - option can only be enabled at cluster creation time. - - Attributes: - enabled (bool): - Whether the Compute Engine PD CSI driver is - enabled for this cluster. - """ - - enabled = proto.Field( - proto.BOOL, - number=1, - ) - - -class PrivateClusterMasterGlobalAccessConfig(proto.Message): - r"""Configuration for controlling master global access settings. - Attributes: - enabled (bool): - Whenever master is accessible globally or - not. - """ - - enabled = proto.Field( - proto.BOOL, - number=1, - ) - - -class PrivateClusterConfig(proto.Message): - r"""Configuration options for private clusters. - Attributes: - enable_private_nodes (bool): - Whether nodes have internal IP addresses - only. If enabled, all nodes are given only RFC - 1918 private addresses and communicate with the - master via private networking. - enable_private_endpoint (bool): - Whether the master's internal IP address is - used as the cluster endpoint. - master_ipv4_cidr_block (str): - The IP range in CIDR notation to use for the - hosted master network. This range will be used - for assigning internal IP addresses to the - master or set of masters, as well as the ILB - VIP. This range must not overlap with any other - ranges in use within the cluster's network. - private_endpoint (str): - Output only. The internal IP address of this - cluster's master endpoint. - public_endpoint (str): - Output only. The external IP address of this - cluster's master endpoint. - peering_name (str): - Output only. The peering name in the customer - VPC used by this cluster. - master_global_access_config (google.container_v1beta1.types.PrivateClusterMasterGlobalAccessConfig): - Controls master global access settings. - """ - - enable_private_nodes = proto.Field( - proto.BOOL, - number=1, - ) - enable_private_endpoint = proto.Field( - proto.BOOL, - number=2, - ) - master_ipv4_cidr_block = proto.Field( - proto.STRING, - number=3, - ) - private_endpoint = proto.Field( - proto.STRING, - number=4, - ) - public_endpoint = proto.Field( - proto.STRING, - number=5, - ) - peering_name = proto.Field( - proto.STRING, - number=7, - ) - master_global_access_config = proto.Field( - proto.MESSAGE, - number=8, - message='PrivateClusterMasterGlobalAccessConfig', - ) - - -class IstioConfig(proto.Message): - r"""Configuration options for Istio addon. - Attributes: - disabled (bool): - Whether Istio is enabled for this cluster. - auth (google.container_v1beta1.types.IstioConfig.IstioAuthMode): - The specified Istio auth mode, either none, - or mutual TLS. - """ - class IstioAuthMode(proto.Enum): - r"""Istio auth mode, - https://istio.io/docs/concepts/security/mutual-tls.html - """ - AUTH_NONE = 0 - AUTH_MUTUAL_TLS = 1 - - disabled = proto.Field( - proto.BOOL, - number=1, - ) - auth = proto.Field( - proto.ENUM, - number=2, - enum=IstioAuthMode, - ) - - -class CloudRunConfig(proto.Message): - r"""Configuration options for the Cloud Run feature. - Attributes: - disabled (bool): - Whether Cloud Run addon is enabled for this - cluster. - load_balancer_type (google.container_v1beta1.types.CloudRunConfig.LoadBalancerType): - Which load balancer type is installed for - Cloud Run. - """ - class LoadBalancerType(proto.Enum): - r"""Load balancer type of ingress service of Cloud Run.""" - LOAD_BALANCER_TYPE_UNSPECIFIED = 0 - LOAD_BALANCER_TYPE_EXTERNAL = 1 - LOAD_BALANCER_TYPE_INTERNAL = 2 - - disabled = proto.Field( - proto.BOOL, - number=1, - ) - load_balancer_type = proto.Field( - proto.ENUM, - number=3, - enum=LoadBalancerType, - ) - - -class MasterAuthorizedNetworksConfig(proto.Message): - r"""Configuration options for the master authorized networks - feature. Enabled master authorized networks will disallow all - external traffic to access Kubernetes master through HTTPS - except traffic from the given CIDR blocks, Google Compute Engine - Public IPs and Google Prod IPs. - - Attributes: - enabled (bool): - Whether or not master authorized networks is - enabled. - cidr_blocks (Sequence[google.container_v1beta1.types.MasterAuthorizedNetworksConfig.CidrBlock]): - cidr_blocks define up to 10 external networks that could - access Kubernetes master through HTTPS. - """ - - class CidrBlock(proto.Message): - r"""CidrBlock contains an optional name and one CIDR block. - Attributes: - display_name (str): - display_name is an optional field for users to identify CIDR - blocks. - cidr_block (str): - cidr_block must be specified in CIDR notation. - """ - - display_name = proto.Field( - proto.STRING, - number=1, - ) - cidr_block = proto.Field( - proto.STRING, - number=2, - ) - - enabled = proto.Field( - proto.BOOL, - number=1, - ) - cidr_blocks = proto.RepeatedField( - proto.MESSAGE, - number=2, - message=CidrBlock, - ) - - -class LegacyAbac(proto.Message): - r"""Configuration for the legacy Attribute Based Access Control - authorization mode. - - Attributes: - enabled (bool): - Whether the ABAC authorizer is enabled for - this cluster. When enabled, identities in the - system, including service accounts, nodes, and - controllers, will have statically granted - permissions beyond those provided by the RBAC - configuration or IAM. - """ - - enabled = proto.Field( - proto.BOOL, - number=1, - ) - - -class NetworkPolicy(proto.Message): - r"""Configuration options for the NetworkPolicy feature. - https://kubernetes.io/docs/concepts/services- - networking/networkpolicies/ - - Attributes: - provider (google.container_v1beta1.types.NetworkPolicy.Provider): - The selected network policy provider. - enabled (bool): - Whether network policy is enabled on the - cluster. - """ - class Provider(proto.Enum): - r"""Allowed Network Policy providers.""" - PROVIDER_UNSPECIFIED = 0 - CALICO = 1 - - provider = proto.Field( - proto.ENUM, - number=1, - enum=Provider, - ) - enabled = proto.Field( - proto.BOOL, - number=2, - ) - - -class IPAllocationPolicy(proto.Message): - r"""Configuration for controlling how IPs are allocated in the - cluster. - - Attributes: - use_ip_aliases (bool): - Whether alias IPs will be used for pod IPs in the cluster. - This is used in conjunction with use_routes. It cannot be - true if use_routes is true. If both use_ip_aliases and - use_routes are false, then the server picks the default IP - allocation mode - create_subnetwork (bool): - Whether a new subnetwork will be created automatically for - the cluster. - - This field is only applicable when ``use_ip_aliases`` is - true. - subnetwork_name (str): - A custom subnetwork name to be used if ``create_subnetwork`` - is true. If this field is empty, then an automatic name will - be chosen for the new subnetwork. - cluster_ipv4_cidr (str): - This field is deprecated, use cluster_ipv4_cidr_block. - node_ipv4_cidr (str): - This field is deprecated, use node_ipv4_cidr_block. - services_ipv4_cidr (str): - This field is deprecated, use services_ipv4_cidr_block. - cluster_secondary_range_name (str): - The name of the secondary range to be used for the cluster - CIDR block. The secondary range will be used for pod IP - addresses. This must be an existing secondary range - associated with the cluster subnetwork. - - This field is only applicable with use_ip_aliases and - create_subnetwork is false. - services_secondary_range_name (str): - The name of the secondary range to be used as for the - services CIDR block. The secondary range will be used for - service ClusterIPs. This must be an existing secondary range - associated with the cluster subnetwork. - - This field is only applicable with use_ip_aliases and - create_subnetwork is false. - cluster_ipv4_cidr_block (str): - The IP address range for the cluster pod IPs. If this field - is set, then ``cluster.cluster_ipv4_cidr`` must be left - blank. - - This field is only applicable when ``use_ip_aliases`` is - true. - - Set to blank to have a range chosen with the default size. - - Set to /netmask (e.g. ``/14``) to have a range chosen with a - specific netmask. - - Set to a - `CIDR `__ - notation (e.g. ``10.96.0.0/14``) from the RFC-1918 private - networks (e.g. ``10.0.0.0/8``, ``172.16.0.0/12``, - ``192.168.0.0/16``) to pick a specific range to use. - node_ipv4_cidr_block (str): - The IP address range of the instance IPs in this cluster. - - This is applicable only if ``create_subnetwork`` is true. - - Set to blank to have a range chosen with the default size. - - Set to /netmask (e.g. ``/14``) to have a range chosen with a - specific netmask. - - Set to a - `CIDR `__ - notation (e.g. ``10.96.0.0/14``) from the RFC-1918 private - networks (e.g. ``10.0.0.0/8``, ``172.16.0.0/12``, - ``192.168.0.0/16``) to pick a specific range to use. - services_ipv4_cidr_block (str): - The IP address range of the services IPs in this cluster. If - blank, a range will be automatically chosen with the default - size. - - This field is only applicable when ``use_ip_aliases`` is - true. - - Set to blank to have a range chosen with the default size. - - Set to /netmask (e.g. ``/14``) to have a range chosen with a - specific netmask. - - Set to a - `CIDR `__ - notation (e.g. ``10.96.0.0/14``) from the RFC-1918 private - networks (e.g. ``10.0.0.0/8``, ``172.16.0.0/12``, - ``192.168.0.0/16``) to pick a specific range to use. - allow_route_overlap (bool): - If true, allow allocation of cluster CIDR ranges that - overlap with certain kinds of network routes. By default we - do not allow cluster CIDR ranges to intersect with any user - declared routes. With allow_route_overlap == true, we allow - overlapping with CIDR ranges that are larger than the - cluster CIDR range. - - If this field is set to true, then cluster and services - CIDRs must be fully-specified (e.g. ``10.96.0.0/14``, but - not ``/14``), which means: - - 1) When ``use_ip_aliases`` is true, - ``cluster_ipv4_cidr_block`` and - ``services_ipv4_cidr_block`` must be fully-specified. - 2) When ``use_ip_aliases`` is false, - ``cluster.cluster_ipv4_cidr`` muse be fully-specified. - tpu_ipv4_cidr_block (str): - The IP address range of the Cloud TPUs in this cluster. If - unspecified, a range will be automatically chosen with the - default size. - - This field is only applicable when ``use_ip_aliases`` is - true. - - If unspecified, the range will use the default size. - - Set to /netmask (e.g. ``/14``) to have a range chosen with a - specific netmask. - - Set to a - `CIDR `__ - notation (e.g. ``10.96.0.0/14``) from the RFC-1918 private - networks (e.g. ``10.0.0.0/8``, ``172.16.0.0/12``, - ``192.168.0.0/16``) to pick a specific range to use. This - field is deprecated, use cluster.tpu_config.ipv4_cidr_block - instead. - use_routes (bool): - Whether routes will be used for pod IPs in the cluster. This - is used in conjunction with use_ip_aliases. It cannot be - true if use_ip_aliases is true. If both use_ip_aliases and - use_routes are false, then the server picks the default IP - allocation mode - """ - - use_ip_aliases = proto.Field( - proto.BOOL, - number=1, - ) - create_subnetwork = proto.Field( - proto.BOOL, - number=2, - ) - subnetwork_name = proto.Field( - proto.STRING, - number=3, - ) - cluster_ipv4_cidr = proto.Field( - proto.STRING, - number=4, - ) - node_ipv4_cidr = proto.Field( - proto.STRING, - number=5, - ) - services_ipv4_cidr = proto.Field( - proto.STRING, - number=6, - ) - cluster_secondary_range_name = proto.Field( - proto.STRING, - number=7, - ) - services_secondary_range_name = proto.Field( - proto.STRING, - number=8, - ) - cluster_ipv4_cidr_block = proto.Field( - proto.STRING, - number=9, - ) - node_ipv4_cidr_block = proto.Field( - proto.STRING, - number=10, - ) - services_ipv4_cidr_block = proto.Field( - proto.STRING, - number=11, - ) - allow_route_overlap = proto.Field( - proto.BOOL, - number=12, - ) - tpu_ipv4_cidr_block = proto.Field( - proto.STRING, - number=13, - ) - use_routes = proto.Field( - proto.BOOL, - number=15, - ) - - -class BinaryAuthorization(proto.Message): - r"""Configuration for Binary Authorization. - Attributes: - enabled (bool): - Enable Binary Authorization for this cluster. - If enabled, all container images will be - validated by Google Binauthz. - """ - - enabled = proto.Field( - proto.BOOL, - number=1, - ) - - -class PodSecurityPolicyConfig(proto.Message): - r"""Configuration for the PodSecurityPolicy feature. - Attributes: - enabled (bool): - Enable the PodSecurityPolicy controller for - this cluster. If enabled, pods must be valid - under a PodSecurityPolicy to be created. - """ - - enabled = proto.Field( - proto.BOOL, - number=1, - ) - - -class AuthenticatorGroupsConfig(proto.Message): - r"""Configuration for returning group information from - authenticators. - - Attributes: - enabled (bool): - Whether this cluster should return group - membership lookups during authentication using a - group of security groups. - security_group (str): - The name of the security group-of-groups to - be used. Only relevant if enabled = true. - """ - - enabled = proto.Field( - proto.BOOL, - number=1, - ) - security_group = proto.Field( - proto.STRING, - number=2, - ) - - -class ClusterTelemetry(proto.Message): - r"""Telemetry integration for the cluster. - Attributes: - type_ (google.container_v1beta1.types.ClusterTelemetry.Type): - Type of the integration. - """ - class Type(proto.Enum): - r"""Type of the integration.""" - UNSPECIFIED = 0 - DISABLED = 1 - ENABLED = 2 - SYSTEM_ONLY = 3 - - type_ = proto.Field( - proto.ENUM, - number=1, - enum=Type, - ) - - -class Cluster(proto.Message): - r"""A Google Kubernetes Engine cluster. - Attributes: - name (str): - The name of this cluster. The name must be unique within - this project and location (e.g. zone or region), and can be - up to 40 characters with the following restrictions: - - - Lowercase letters, numbers, and hyphens only. - - Must start with a letter. - - Must end with a number or a letter. - description (str): - An optional description of this cluster. - initial_node_count (int): - The number of nodes to create in this cluster. You must - ensure that your Compute Engine `resource - quota `__ is - sufficient for this number of instances. You must also have - available firewall and routes quota. For requests, this - field should only be used in lieu of a "node_pool" object, - since this configuration (along with the "node_config") will - be used to create a "NodePool" object with an auto-generated - name. Do not use this and a node_pool at the same time. - - This field is deprecated, use node_pool.initial_node_count - instead. - node_config (google.container_v1beta1.types.NodeConfig): - Parameters used in creating the cluster's nodes. For - requests, this field should only be used in lieu of a - "node_pool" object, since this configuration (along with the - "initial_node_count") will be used to create a "NodePool" - object with an auto-generated name. Do not use this and a - node_pool at the same time. For responses, this field will - be populated with the node configuration of the first node - pool. (For configuration of each node pool, see - ``node_pool.config``) - - If unspecified, the defaults are used. This field is - deprecated, use node_pool.config instead. - master_auth (google.container_v1beta1.types.MasterAuth): - The authentication information for accessing the master - endpoint. If unspecified, the defaults are used: For - clusters before v1.12, if master_auth is unspecified, - ``username`` will be set to "admin", a random password will - be generated, and a client certificate will be issued. - logging_service (str): - The logging service the cluster should use to write logs. - Currently available options: - - - ``logging.googleapis.com/kubernetes`` - The Cloud Logging - service with a Kubernetes-native resource model - - ``logging.googleapis.com`` - The legacy Cloud Logging - service (no longer available as of GKE 1.15). - - ``none`` - no logs will be exported from the cluster. - - If left as an empty - string,\ ``logging.googleapis.com/kubernetes`` will be used - for GKE 1.14+ or ``logging.googleapis.com`` for earlier - versions. - monitoring_service (str): - The monitoring service the cluster should use to write - metrics. Currently available options: - - - "monitoring.googleapis.com/kubernetes" - The Cloud - Monitoring service with a Kubernetes-native resource - model - - ``monitoring.googleapis.com`` - The legacy Cloud - Monitoring service (no longer available as of GKE 1.15). - - ``none`` - No metrics will be exported from the cluster. - - If left as an empty - string,\ ``monitoring.googleapis.com/kubernetes`` will be - used for GKE 1.14+ or ``monitoring.googleapis.com`` for - earlier versions. - network (str): - The name of the Google Compute Engine - `network `__ - to which the cluster is connected. If left unspecified, the - ``default`` network will be used. On output this shows the - network ID instead of the name. - cluster_ipv4_cidr (str): - The IP address range of the container pods in this cluster, - in - `CIDR `__ - notation (e.g. ``10.96.0.0/14``). Leave blank to have one - automatically chosen or specify a ``/14`` block in - ``10.0.0.0/8``. - addons_config (google.container_v1beta1.types.AddonsConfig): - Configurations for the various addons - available to run in the cluster. - subnetwork (str): - The name of the Google Compute Engine - `subnetwork `__ - to which the cluster is connected. On output this shows the - subnetwork ID instead of the name. - node_pools (Sequence[google.container_v1beta1.types.NodePool]): - The node pools associated with this cluster. This field - should not be set if "node_config" or "initial_node_count" - are specified. - locations (Sequence[str]): - The list of Google Compute Engine - `zones `__ - in which the cluster's nodes should be located. - - This field provides a default value if - `NodePool.Locations `__ - are not specified during node pool creation. - - Warning: changing cluster locations will update the - `NodePool.Locations `__ - of all node pools and will result in nodes being added - and/or removed. - enable_kubernetes_alpha (bool): - Kubernetes alpha features are enabled on this - cluster. This includes alpha API groups (e.g. - v1beta1) and features that may not be production - ready in the kubernetes version of the master - and nodes. The cluster has no SLA for uptime and - master/node upgrades are disabled. Alpha enabled - clusters are automatically deleted thirty days - after creation. - resource_labels (Sequence[google.container_v1beta1.types.Cluster.ResourceLabelsEntry]): - The resource labels for the cluster to use to - annotate any related Google Compute Engine - resources. - label_fingerprint (str): - The fingerprint of the set of labels for this - cluster. - legacy_abac (google.container_v1beta1.types.LegacyAbac): - Configuration for the legacy ABAC - authorization mode. - network_policy (google.container_v1beta1.types.NetworkPolicy): - Configuration options for the NetworkPolicy - feature. - ip_allocation_policy (google.container_v1beta1.types.IPAllocationPolicy): - Configuration for cluster IP allocation. - master_authorized_networks_config (google.container_v1beta1.types.MasterAuthorizedNetworksConfig): - The configuration options for master - authorized networks feature. - maintenance_policy (google.container_v1beta1.types.MaintenancePolicy): - Configure the maintenance policy for this - cluster. - binary_authorization (google.container_v1beta1.types.BinaryAuthorization): - Configuration for Binary Authorization. - pod_security_policy_config (google.container_v1beta1.types.PodSecurityPolicyConfig): - Configuration for the PodSecurityPolicy - feature. - autoscaling (google.container_v1beta1.types.ClusterAutoscaling): - Cluster-level autoscaling configuration. - network_config (google.container_v1beta1.types.NetworkConfig): - Configuration for cluster networking. - private_cluster (bool): - If this is a private cluster setup. Private clusters are - clusters that, by default have no external IP addresses on - the nodes and where nodes and the master communicate over - private IP addresses. This field is deprecated, use - private_cluster_config.enable_private_nodes instead. - master_ipv4_cidr_block (str): - The IP prefix in CIDR notation to use for the hosted master - network. This prefix will be used for assigning private IP - addresses to the master or set of masters, as well as the - ILB VIP. This field is deprecated, use - private_cluster_config.master_ipv4_cidr_block instead. - default_max_pods_constraint (google.container_v1beta1.types.MaxPodsConstraint): - The default constraint on the maximum number - of pods that can be run simultaneously on a node - in the node pool of this cluster. Only honored - if cluster created with IP Alias support. - resource_usage_export_config (google.container_v1beta1.types.ResourceUsageExportConfig): - Configuration for exporting resource usages. - Resource usage export is disabled when this - config unspecified. - authenticator_groups_config (google.container_v1beta1.types.AuthenticatorGroupsConfig): - Configuration controlling RBAC group - membership information. - private_cluster_config (google.container_v1beta1.types.PrivateClusterConfig): - Configuration for private cluster. - vertical_pod_autoscaling (google.container_v1beta1.types.VerticalPodAutoscaling): - Cluster-level Vertical Pod Autoscaling - configuration. - shielded_nodes (google.container_v1beta1.types.ShieldedNodes): - Shielded Nodes configuration. - release_channel (google.container_v1beta1.types.ReleaseChannel): - Release channel configuration. - workload_identity_config (google.container_v1beta1.types.WorkloadIdentityConfig): - Configuration for the use of Kubernetes - Service Accounts in GCP IAM policies. - cluster_telemetry (google.container_v1beta1.types.ClusterTelemetry): - Telemetry integration for the cluster. - tpu_config (google.container_v1beta1.types.TpuConfig): - Configuration for Cloud TPU support; - notification_config (google.container_v1beta1.types.NotificationConfig): - Notification configuration of the cluster. - confidential_nodes (google.container_v1beta1.types.ConfidentialNodes): - Configuration of Confidential Nodes - self_link (str): - [Output only] Server-defined URL for the resource. - zone (str): - [Output only] The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field is deprecated, use - location instead. - endpoint (str): - [Output only] The IP address of this cluster's master - endpoint. The endpoint can be accessed from the internet at - ``https://username:password@endpoint/``. - - See the ``masterAuth`` property of this resource for - username and password information. - initial_cluster_version (str): - The initial Kubernetes version for this - cluster. Valid versions are those found in - validMasterVersions returned by getServerConfig. - The version can be upgraded over time; such - upgrades are reflected in currentMasterVersion - and currentNodeVersion. - - Users may specify either explicit versions - offered by Kubernetes Engine or version aliases, - which have the following behavior: - - "latest": picks the highest valid Kubernetes - version - "1.X": picks the highest valid - patch+gke.N patch in the 1.X version - "1.X.Y": - picks the highest valid gke.N patch in the 1.X.Y - version - "1.X.Y-gke.N": picks an explicit - Kubernetes version - "","-": picks the default - Kubernetes version - current_master_version (str): - [Output only] The current software version of the master - endpoint. - current_node_version (str): - [Output only] Deprecated, use - `NodePool.version `__ - instead. The current version of the node software - components. If they are currently at multiple versions - because they're in the process of being upgraded, this - reflects the minimum version of all nodes. - create_time (str): - [Output only] The time the cluster was created, in - `RFC3339 `__ text - format. - status (google.container_v1beta1.types.Cluster.Status): - [Output only] The current status of this cluster. - status_message (str): - [Output only] Deprecated. Use conditions instead. Additional - information about the current status of this cluster, if - available. - node_ipv4_cidr_size (int): - [Output only] The size of the address space on each node for - hosting containers. This is provisioned from within the - ``container_ipv4_cidr`` range. This field will only be set - when cluster is in route-based network mode. - services_ipv4_cidr (str): - [Output only] The IP address range of the Kubernetes - services in this cluster, in - `CIDR `__ - notation (e.g. ``1.2.3.4/29``). Service addresses are - typically put in the last ``/16`` from the container CIDR. - instance_group_urls (Sequence[str]): - Deprecated. Use node_pools.instance_group_urls. - current_node_count (int): - [Output only] The number of nodes currently in the cluster. - Deprecated. Call Kubernetes API directly to retrieve node - information. - expire_time (str): - [Output only] The time the cluster will be automatically - deleted in - `RFC3339 `__ text - format. - location (str): - [Output only] The name of the Google Compute Engine - `zone `__ - or - `region `__ - in which the cluster resides. - enable_tpu (bool): - Enable the ability to use Cloud TPUs in this cluster. This - field is deprecated, use tpu_config.enabled instead. - tpu_ipv4_cidr_block (str): - [Output only] The IP address range of the Cloud TPUs in this - cluster, in - `CIDR `__ - notation (e.g. ``1.2.3.4/29``). - database_encryption (google.container_v1beta1.types.DatabaseEncryption): - Configuration of etcd encryption. - conditions (Sequence[google.container_v1beta1.types.StatusCondition]): - Which conditions caused the current cluster - state. - master (google.container_v1beta1.types.Master): - Configuration for master components. - """ - class Status(proto.Enum): - r"""The current status of the cluster.""" - STATUS_UNSPECIFIED = 0 - PROVISIONING = 1 - RUNNING = 2 - RECONCILING = 3 - STOPPING = 4 - ERROR = 5 - DEGRADED = 6 - - name = proto.Field( - proto.STRING, - number=1, - ) - description = proto.Field( - proto.STRING, - number=2, - ) - initial_node_count = proto.Field( - proto.INT32, - number=3, - ) - node_config = proto.Field( - proto.MESSAGE, - number=4, - message='NodeConfig', - ) - master_auth = proto.Field( - proto.MESSAGE, - number=5, - message='MasterAuth', - ) - logging_service = proto.Field( - proto.STRING, - number=6, - ) - monitoring_service = proto.Field( - proto.STRING, - number=7, - ) - network = proto.Field( - proto.STRING, - number=8, - ) - cluster_ipv4_cidr = proto.Field( - proto.STRING, - number=9, - ) - addons_config = proto.Field( - proto.MESSAGE, - number=10, - message='AddonsConfig', - ) - subnetwork = proto.Field( - proto.STRING, - number=11, - ) - node_pools = proto.RepeatedField( - proto.MESSAGE, - number=12, - message='NodePool', - ) - locations = proto.RepeatedField( - proto.STRING, - number=13, - ) - enable_kubernetes_alpha = proto.Field( - proto.BOOL, - number=14, - ) - resource_labels = proto.MapField( - proto.STRING, - proto.STRING, - number=15, - ) - label_fingerprint = proto.Field( - proto.STRING, - number=16, - ) - legacy_abac = proto.Field( - proto.MESSAGE, - number=18, - message='LegacyAbac', - ) - network_policy = proto.Field( - proto.MESSAGE, - number=19, - message='NetworkPolicy', - ) - ip_allocation_policy = proto.Field( - proto.MESSAGE, - number=20, - message='IPAllocationPolicy', - ) - master_authorized_networks_config = proto.Field( - proto.MESSAGE, - number=22, - message='MasterAuthorizedNetworksConfig', - ) - maintenance_policy = proto.Field( - proto.MESSAGE, - number=23, - message='MaintenancePolicy', - ) - binary_authorization = proto.Field( - proto.MESSAGE, - number=24, - message='BinaryAuthorization', - ) - pod_security_policy_config = proto.Field( - proto.MESSAGE, - number=25, - message='PodSecurityPolicyConfig', - ) - autoscaling = proto.Field( - proto.MESSAGE, - number=26, - message='ClusterAutoscaling', - ) - network_config = proto.Field( - proto.MESSAGE, - number=27, - message='NetworkConfig', - ) - private_cluster = proto.Field( - proto.BOOL, - number=28, - ) - master_ipv4_cidr_block = proto.Field( - proto.STRING, - number=29, - ) - default_max_pods_constraint = proto.Field( - proto.MESSAGE, - number=30, - message='MaxPodsConstraint', - ) - resource_usage_export_config = proto.Field( - proto.MESSAGE, - number=33, - message='ResourceUsageExportConfig', - ) - authenticator_groups_config = proto.Field( - proto.MESSAGE, - number=34, - message='AuthenticatorGroupsConfig', - ) - private_cluster_config = proto.Field( - proto.MESSAGE, - number=37, - message='PrivateClusterConfig', - ) - vertical_pod_autoscaling = proto.Field( - proto.MESSAGE, - number=39, - message='VerticalPodAutoscaling', - ) - shielded_nodes = proto.Field( - proto.MESSAGE, - number=40, - message='ShieldedNodes', - ) - release_channel = proto.Field( - proto.MESSAGE, - number=41, - message='ReleaseChannel', - ) - workload_identity_config = proto.Field( - proto.MESSAGE, - number=43, - message='WorkloadIdentityConfig', - ) - cluster_telemetry = proto.Field( - proto.MESSAGE, - number=46, - message='ClusterTelemetry', - ) - tpu_config = proto.Field( - proto.MESSAGE, - number=47, - message='TpuConfig', - ) - notification_config = proto.Field( - proto.MESSAGE, - number=49, - message='NotificationConfig', - ) - confidential_nodes = proto.Field( - proto.MESSAGE, - number=50, - message='ConfidentialNodes', - ) - self_link = proto.Field( - proto.STRING, - number=100, - ) - zone = proto.Field( - proto.STRING, - number=101, - ) - endpoint = proto.Field( - proto.STRING, - number=102, - ) - initial_cluster_version = proto.Field( - proto.STRING, - number=103, - ) - current_master_version = proto.Field( - proto.STRING, - number=104, - ) - current_node_version = proto.Field( - proto.STRING, - number=105, - ) - create_time = proto.Field( - proto.STRING, - number=106, - ) - status = proto.Field( - proto.ENUM, - number=107, - enum=Status, - ) - status_message = proto.Field( - proto.STRING, - number=108, - ) - node_ipv4_cidr_size = proto.Field( - proto.INT32, - number=109, - ) - services_ipv4_cidr = proto.Field( - proto.STRING, - number=110, - ) - instance_group_urls = proto.RepeatedField( - proto.STRING, - number=111, - ) - current_node_count = proto.Field( - proto.INT32, - number=112, - ) - expire_time = proto.Field( - proto.STRING, - number=113, - ) - location = proto.Field( - proto.STRING, - number=114, - ) - enable_tpu = proto.Field( - proto.BOOL, - number=115, - ) - tpu_ipv4_cidr_block = proto.Field( - proto.STRING, - number=116, - ) - database_encryption = proto.Field( - proto.MESSAGE, - number=38, - message='DatabaseEncryption', - ) - conditions = proto.RepeatedField( - proto.MESSAGE, - number=118, - message='StatusCondition', - ) - master = proto.Field( - proto.MESSAGE, - number=124, - message='Master', - ) - - -class ClusterUpdate(proto.Message): - r"""ClusterUpdate describes an update to the cluster. Exactly one - update can be applied to a cluster with each request, so at most - one field can be provided. - - Attributes: - desired_node_version (str): - The Kubernetes version to change the nodes to - (typically an upgrade). - - Users may specify either explicit versions - offered by Kubernetes Engine or version aliases, - which have the following behavior: - - "latest": picks the highest valid Kubernetes - version - "1.X": picks the highest valid - patch+gke.N patch in the 1.X version - "1.X.Y": - picks the highest valid gke.N patch in the 1.X.Y - version - "1.X.Y-gke.N": picks an explicit - Kubernetes version - "-": picks the Kubernetes - master version - desired_monitoring_service (str): - The monitoring service the cluster should use to write - metrics. Currently available options: - - - "monitoring.googleapis.com/kubernetes" - The Cloud - Monitoring service with a Kubernetes-native resource - model - - ``monitoring.googleapis.com`` - The legacy Cloud - Monitoring service (no longer available as of GKE 1.15). - - ``none`` - No metrics will be exported from the cluster. - - If left as an empty - string,\ ``monitoring.googleapis.com/kubernetes`` will be - used for GKE 1.14+ or ``monitoring.googleapis.com`` for - earlier versions. - desired_addons_config (google.container_v1beta1.types.AddonsConfig): - Configurations for the various addons - available to run in the cluster. - desired_node_pool_id (str): - The node pool to be upgraded. This field is mandatory if - "desired_node_version", "desired_image_family", - "desired_node_pool_autoscaling", or - "desired_workload_metadata_config" is specified and there is - more than one node pool on the cluster. - desired_image_type (str): - The desired image type for the node pool. NOTE: Set the - "desired_node_pool" field as well. - desired_node_pool_autoscaling (google.container_v1beta1.types.NodePoolAutoscaling): - Autoscaler configuration for the node pool specified in - desired_node_pool_id. If there is only one pool in the - cluster and desired_node_pool_id is not provided then the - change applies to that single node pool. - desired_locations (Sequence[str]): - The desired list of Google Compute Engine - `zones `__ - in which the cluster's nodes should be located. - - This list must always include the cluster's primary zone. - - Warning: changing cluster locations will update the - locations of all node pools and will result in nodes being - added and/or removed. - desired_master_authorized_networks_config (google.container_v1beta1.types.MasterAuthorizedNetworksConfig): - The desired configuration options for master - authorized networks feature. - desired_pod_security_policy_config (google.container_v1beta1.types.PodSecurityPolicyConfig): - The desired configuration options for the - PodSecurityPolicy feature. - desired_cluster_autoscaling (google.container_v1beta1.types.ClusterAutoscaling): - Cluster-level autoscaling configuration. - desired_binary_authorization (google.container_v1beta1.types.BinaryAuthorization): - The desired configuration options for the - Binary Authorization feature. - desired_logging_service (str): - The logging service the cluster should use to write logs. - Currently available options: - - - ``logging.googleapis.com/kubernetes`` - The Cloud Logging - service with a Kubernetes-native resource model - - ``logging.googleapis.com`` - The legacy Cloud Logging - service (no longer available as of GKE 1.15). - - ``none`` - no logs will be exported from the cluster. - - If left as an empty - string,\ ``logging.googleapis.com/kubernetes`` will be used - for GKE 1.14+ or ``logging.googleapis.com`` for earlier - versions. - desired_resource_usage_export_config (google.container_v1beta1.types.ResourceUsageExportConfig): - The desired configuration for exporting - resource usage. - desired_vertical_pod_autoscaling (google.container_v1beta1.types.VerticalPodAutoscaling): - Cluster-level Vertical Pod Autoscaling - configuration. - desired_private_cluster_config (google.container_v1beta1.types.PrivateClusterConfig): - The desired private cluster configuration. - desired_intra_node_visibility_config (google.container_v1beta1.types.IntraNodeVisibilityConfig): - The desired config of Intra-node visibility. - desired_default_snat_status (google.container_v1beta1.types.DefaultSnatStatus): - The desired status of whether to disable - default sNAT for this cluster. - desired_cluster_telemetry (google.container_v1beta1.types.ClusterTelemetry): - The desired telemetry integration for the - cluster. - desired_release_channel (google.container_v1beta1.types.ReleaseChannel): - The desired release channel configuration. - desired_tpu_config (google.container_v1beta1.types.TpuConfig): - The desired Cloud TPU configuration. - desired_datapath_provider (google.container_v1beta1.types.DatapathProvider): - The desired datapath provider for the - cluster. - desired_notification_config (google.container_v1beta1.types.NotificationConfig): - The desired notification configuration. - desired_master_version (str): - The Kubernetes version to change the master - to. The only valid value is the latest supported - version. - Users may specify either explicit versions - offered by Kubernetes Engine or version aliases, - which have the following behavior: - - "latest": picks the highest valid Kubernetes - version - "1.X": picks the highest valid - patch+gke.N patch in the 1.X version - "1.X.Y": - picks the highest valid gke.N patch in the 1.X.Y - version - "1.X.Y-gke.N": picks an explicit - Kubernetes version - "-": picks the default - Kubernetes version - desired_database_encryption (google.container_v1beta1.types.DatabaseEncryption): - Configuration of etcd encryption. - desired_workload_identity_config (google.container_v1beta1.types.WorkloadIdentityConfig): - Configuration for Workload Identity. - desired_shielded_nodes (google.container_v1beta1.types.ShieldedNodes): - Configuration for Shielded Nodes. - desired_master (google.container_v1beta1.types.Master): - Configuration for master components. - desired_authenticator_groups_config (google.container_v1beta1.types.AuthenticatorGroupsConfig): - AuthenticatorGroupsConfig specifies the - config for the cluster security groups settings. - """ - - desired_node_version = proto.Field( - proto.STRING, - number=4, - ) - desired_monitoring_service = proto.Field( - proto.STRING, - number=5, - ) - desired_addons_config = proto.Field( - proto.MESSAGE, - number=6, - message='AddonsConfig', - ) - desired_node_pool_id = proto.Field( - proto.STRING, - number=7, - ) - desired_image_type = proto.Field( - proto.STRING, - number=8, - ) - desired_node_pool_autoscaling = proto.Field( - proto.MESSAGE, - number=9, - message='NodePoolAutoscaling', - ) - desired_locations = proto.RepeatedField( - proto.STRING, - number=10, - ) - desired_master_authorized_networks_config = proto.Field( - proto.MESSAGE, - number=12, - message='MasterAuthorizedNetworksConfig', - ) - desired_pod_security_policy_config = proto.Field( - proto.MESSAGE, - number=14, - message='PodSecurityPolicyConfig', - ) - desired_cluster_autoscaling = proto.Field( - proto.MESSAGE, - number=15, - message='ClusterAutoscaling', - ) - desired_binary_authorization = proto.Field( - proto.MESSAGE, - number=16, - message='BinaryAuthorization', - ) - desired_logging_service = proto.Field( - proto.STRING, - number=19, - ) - desired_resource_usage_export_config = proto.Field( - proto.MESSAGE, - number=21, - message='ResourceUsageExportConfig', - ) - desired_vertical_pod_autoscaling = proto.Field( - proto.MESSAGE, - number=22, - message='VerticalPodAutoscaling', - ) - desired_private_cluster_config = proto.Field( - proto.MESSAGE, - number=25, - message='PrivateClusterConfig', - ) - desired_intra_node_visibility_config = proto.Field( - proto.MESSAGE, - number=26, - message='IntraNodeVisibilityConfig', - ) - desired_default_snat_status = proto.Field( - proto.MESSAGE, - number=28, - message='DefaultSnatStatus', - ) - desired_cluster_telemetry = proto.Field( - proto.MESSAGE, - number=30, - message='ClusterTelemetry', - ) - desired_release_channel = proto.Field( - proto.MESSAGE, - number=31, - message='ReleaseChannel', - ) - desired_tpu_config = proto.Field( - proto.MESSAGE, - number=38, - message='TpuConfig', - ) - desired_datapath_provider = proto.Field( - proto.ENUM, - number=50, - enum='DatapathProvider', - ) - desired_notification_config = proto.Field( - proto.MESSAGE, - number=55, - message='NotificationConfig', - ) - desired_master_version = proto.Field( - proto.STRING, - number=100, - ) - desired_database_encryption = proto.Field( - proto.MESSAGE, - number=46, - message='DatabaseEncryption', - ) - desired_workload_identity_config = proto.Field( - proto.MESSAGE, - number=47, - message='WorkloadIdentityConfig', - ) - desired_shielded_nodes = proto.Field( - proto.MESSAGE, - number=48, - message='ShieldedNodes', - ) - desired_master = proto.Field( - proto.MESSAGE, - number=52, - message='Master', - ) - desired_authenticator_groups_config = proto.Field( - proto.MESSAGE, - number=63, - message='AuthenticatorGroupsConfig', - ) - - -class Operation(proto.Message): - r"""This operation resource represents operations that may have - happened or are happening on the cluster. All fields are output - only. - - Attributes: - name (str): - The server-assigned ID for the operation. - zone (str): - The name of the Google Compute Engine - `zone `__ - in which the operation is taking place. This field is - deprecated, use location instead. - operation_type (google.container_v1beta1.types.Operation.Type): - The operation type. - status (google.container_v1beta1.types.Operation.Status): - The current status of the operation. - detail (str): - Detailed operation progress, if available. - status_message (str): - Output only. If an error has occurred, a - textual description of the error. Deprecated. - Use field error instead. - self_link (str): - Server-defined URL for the resource. - target_link (str): - Server-defined URL for the target of the - operation. - location (str): - [Output only] The name of the Google Compute Engine - `zone `__ - or - `region `__ - in which the cluster resides. - start_time (str): - [Output only] The time the operation started, in - `RFC3339 `__ text - format. - end_time (str): - [Output only] The time the operation completed, in - `RFC3339 `__ text - format. - progress (google.container_v1beta1.types.OperationProgress): - Output only. [Output only] Progress information for an - operation. - cluster_conditions (Sequence[google.container_v1beta1.types.StatusCondition]): - Which conditions caused the current cluster - state. Deprecated. Use field error instead. - nodepool_conditions (Sequence[google.container_v1beta1.types.StatusCondition]): - Which conditions caused the current node pool - state. Deprecated. Use field error instead. - error (google.rpc.status_pb2.Status): - The error result of the operation in case of - failure. - """ - class Status(proto.Enum): - r"""Current status of the operation.""" - STATUS_UNSPECIFIED = 0 - PENDING = 1 - RUNNING = 2 - DONE = 3 - ABORTING = 4 - - class Type(proto.Enum): - r"""Operation type.""" - TYPE_UNSPECIFIED = 0 - CREATE_CLUSTER = 1 - DELETE_CLUSTER = 2 - UPGRADE_MASTER = 3 - UPGRADE_NODES = 4 - REPAIR_CLUSTER = 5 - UPDATE_CLUSTER = 6 - CREATE_NODE_POOL = 7 - DELETE_NODE_POOL = 8 - SET_NODE_POOL_MANAGEMENT = 9 - AUTO_REPAIR_NODES = 10 - AUTO_UPGRADE_NODES = 11 - SET_LABELS = 12 - SET_MASTER_AUTH = 13 - SET_NODE_POOL_SIZE = 14 - SET_NETWORK_POLICY = 15 - SET_MAINTENANCE_POLICY = 16 - - name = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - operation_type = proto.Field( - proto.ENUM, - number=3, - enum=Type, - ) - status = proto.Field( - proto.ENUM, - number=4, - enum=Status, - ) - detail = proto.Field( - proto.STRING, - number=8, - ) - status_message = proto.Field( - proto.STRING, - number=5, - ) - self_link = proto.Field( - proto.STRING, - number=6, - ) - target_link = proto.Field( - proto.STRING, - number=7, - ) - location = proto.Field( - proto.STRING, - number=9, - ) - start_time = proto.Field( - proto.STRING, - number=10, - ) - end_time = proto.Field( - proto.STRING, - number=11, - ) - progress = proto.Field( - proto.MESSAGE, - number=12, - message='OperationProgress', - ) - cluster_conditions = proto.RepeatedField( - proto.MESSAGE, - number=13, - message='StatusCondition', - ) - nodepool_conditions = proto.RepeatedField( - proto.MESSAGE, - number=14, - message='StatusCondition', - ) - error = proto.Field( - proto.MESSAGE, - number=15, - message=status_pb2.Status, - ) - - -class OperationProgress(proto.Message): - r"""Information about operation (or operation stage) progress. - Attributes: - name (str): - A non-parameterized string describing an - operation stage. Unset for single-stage - operations. - status (google.container_v1beta1.types.Operation.Status): - Status of an operation stage. - Unset for single-stage operations. - metrics (Sequence[google.container_v1beta1.types.OperationProgress.Metric]): - Progress metric bundle, for example: metrics: [{name: "nodes - done", int_value: 15}, {name: "nodes total", int_value: 32}] - or metrics: [{name: "progress", double_value: 0.56}, {name: - "progress scale", double_value: 1.0}] - stages (Sequence[google.container_v1beta1.types.OperationProgress]): - Substages of an operation or a stage. - """ - - class Metric(proto.Message): - r"""Progress metric is (string, int|float|string) pair. - Attributes: - name (str): - Required. Metric name, e.g., "nodes total", - "percent done". - int_value (int): - For metrics with integer value. - double_value (float): - For metrics with floating point value. - string_value (str): - For metrics with custom values (ratios, - visual progress, etc.). - """ - - name = proto.Field( - proto.STRING, - number=1, - ) - int_value = proto.Field( - proto.INT64, - number=2, - oneof='value', - ) - double_value = proto.Field( - proto.DOUBLE, - number=3, - oneof='value', - ) - string_value = proto.Field( - proto.STRING, - number=4, - oneof='value', - ) - - name = proto.Field( - proto.STRING, - number=1, - ) - status = proto.Field( - proto.ENUM, - number=2, - enum='Operation.Status', - ) - metrics = proto.RepeatedField( - proto.MESSAGE, - number=3, - message=Metric, - ) - stages = proto.RepeatedField( - proto.MESSAGE, - number=4, - message='OperationProgress', - ) - - -class CreateClusterRequest(proto.Message): - r"""CreateClusterRequest creates a cluster. - Attributes: - project_id (str): - Required. Deprecated. The Google Developers Console `project - ID or project - number `__. - This field has been deprecated and replaced by the parent - field. - zone (str): - Required. Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the parent field. - cluster (google.container_v1beta1.types.Cluster): - Required. A `cluster - resource `__ - parent (str): - The parent (project and location) where the cluster will be - created. Specified in the format ``projects/*/locations/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster = proto.Field( - proto.MESSAGE, - number=3, - message='Cluster', - ) - parent = proto.Field( - proto.STRING, - number=5, - ) - - -class GetClusterRequest(proto.Message): - r"""GetClusterRequest gets the settings of a cluster. - Attributes: - project_id (str): - Required. Deprecated. The Google Developers Console `project - ID or project - number `__. - This field has been deprecated and replaced by the name - field. - zone (str): - Required. Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the name field. - cluster_id (str): - Required. Deprecated. The name of the cluster - to retrieve. This field has been deprecated and - replaced by the name field. - name (str): - The name (project, location, cluster) of the cluster to - retrieve. Specified in the format - ``projects/*/locations/*/clusters/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster_id = proto.Field( - proto.STRING, - number=3, - ) - name = proto.Field( - proto.STRING, - number=5, - ) - - -class UpdateClusterRequest(proto.Message): - r"""UpdateClusterRequest updates the settings of a cluster. - Attributes: - project_id (str): - Required. Deprecated. The Google Developers Console `project - ID or project - number `__. - This field has been deprecated and replaced by the name - field. - zone (str): - Required. Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the name field. - cluster_id (str): - Required. Deprecated. The name of the cluster - to upgrade. This field has been deprecated and - replaced by the name field. - update (google.container_v1beta1.types.ClusterUpdate): - Required. A description of the update. - name (str): - The name (project, location, cluster) of the cluster to - update. Specified in the format - ``projects/*/locations/*/clusters/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster_id = proto.Field( - proto.STRING, - number=3, - ) - update = proto.Field( - proto.MESSAGE, - number=4, - message='ClusterUpdate', - ) - name = proto.Field( - proto.STRING, - number=5, - ) - - -class UpdateNodePoolRequest(proto.Message): - r"""SetNodePoolVersionRequest updates the version of a node pool. - Attributes: - project_id (str): - Required. Deprecated. The Google Developers Console `project - ID or project - number `__. - This field has been deprecated and replaced by the name - field. - zone (str): - Required. Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the name field. - cluster_id (str): - Required. Deprecated. The name of the cluster - to upgrade. This field has been deprecated and - replaced by the name field. - node_pool_id (str): - Required. Deprecated. The name of the node - pool to upgrade. This field has been deprecated - and replaced by the name field. - node_version (str): - Required. The Kubernetes version to change - the nodes to (typically an upgrade). - - Users may specify either explicit versions - offered by Kubernetes Engine or version aliases, - which have the following behavior: - - "latest": picks the highest valid Kubernetes - version - "1.X": picks the highest valid - patch+gke.N patch in the 1.X version - "1.X.Y": - picks the highest valid gke.N patch in the 1.X.Y - version - "1.X.Y-gke.N": picks an explicit - Kubernetes version - "-": picks the Kubernetes - master version - image_type (str): - Required. The desired image type for the node - pool. - locations (Sequence[str]): - The desired list of Google Compute Engine - `zones `__ - in which the node pool's nodes should be located. Changing - the locations for a node pool will result in nodes being - either created or removed from the node pool, depending on - whether locations are being added or removed. - workload_metadata_config (google.container_v1beta1.types.WorkloadMetadataConfig): - The desired workload metadata config for the - node pool. - name (str): - The name (project, location, cluster, node pool) of the node - pool to update. Specified in the format - ``projects/*/locations/*/clusters/*/nodePools/*``. - upgrade_settings (google.container_v1beta1.types.NodePool.UpgradeSettings): - Upgrade settings control disruption and speed - of the upgrade. - linux_node_config (google.container_v1beta1.types.LinuxNodeConfig): - Parameters that can be configured on Linux - nodes. - kubelet_config (google.container_v1beta1.types.NodeKubeletConfig): - Node kubelet configs. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster_id = proto.Field( - proto.STRING, - number=3, - ) - node_pool_id = proto.Field( - proto.STRING, - number=4, - ) - node_version = proto.Field( - proto.STRING, - number=5, - ) - image_type = proto.Field( - proto.STRING, - number=6, - ) - locations = proto.RepeatedField( - proto.STRING, - number=13, - ) - workload_metadata_config = proto.Field( - proto.MESSAGE, - number=14, - message='WorkloadMetadataConfig', - ) - name = proto.Field( - proto.STRING, - number=8, - ) - upgrade_settings = proto.Field( - proto.MESSAGE, - number=15, - message='NodePool.UpgradeSettings', - ) - linux_node_config = proto.Field( - proto.MESSAGE, - number=19, - message='LinuxNodeConfig', - ) - kubelet_config = proto.Field( - proto.MESSAGE, - number=20, - message='NodeKubeletConfig', - ) - - -class SetNodePoolAutoscalingRequest(proto.Message): - r"""SetNodePoolAutoscalingRequest sets the autoscaler settings of - a node pool. - - Attributes: - project_id (str): - Required. Deprecated. The Google Developers Console `project - ID or project - number `__. - This field has been deprecated and replaced by the name - field. - zone (str): - Required. Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the name field. - cluster_id (str): - Required. Deprecated. The name of the cluster - to upgrade. This field has been deprecated and - replaced by the name field. - node_pool_id (str): - Required. Deprecated. The name of the node - pool to upgrade. This field has been deprecated - and replaced by the name field. - autoscaling (google.container_v1beta1.types.NodePoolAutoscaling): - Required. Autoscaling configuration for the - node pool. - name (str): - The name (project, location, cluster, node pool) of the node - pool to set autoscaler settings. Specified in the format - ``projects/*/locations/*/clusters/*/nodePools/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster_id = proto.Field( - proto.STRING, - number=3, - ) - node_pool_id = proto.Field( - proto.STRING, - number=4, - ) - autoscaling = proto.Field( - proto.MESSAGE, - number=5, - message='NodePoolAutoscaling', - ) - name = proto.Field( - proto.STRING, - number=6, - ) - - -class SetLoggingServiceRequest(proto.Message): - r"""SetLoggingServiceRequest sets the logging service of a - cluster. - - Attributes: - project_id (str): - Required. Deprecated. The Google Developers Console `project - ID or project - number `__. - This field has been deprecated and replaced by the name - field. - zone (str): - Required. Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the name field. - cluster_id (str): - Required. Deprecated. The name of the cluster - to upgrade. This field has been deprecated and - replaced by the name field. - logging_service (str): - Required. The logging service the cluster should use to - write logs. Currently available options: - - - ``logging.googleapis.com/kubernetes`` - The Cloud Logging - service with a Kubernetes-native resource model - - ``logging.googleapis.com`` - The legacy Cloud Logging - service (no longer available as of GKE 1.15). - - ``none`` - no logs will be exported from the cluster. - - If left as an empty - string,\ ``logging.googleapis.com/kubernetes`` will be used - for GKE 1.14+ or ``logging.googleapis.com`` for earlier - versions. - name (str): - The name (project, location, cluster) of the cluster to set - logging. Specified in the format - ``projects/*/locations/*/clusters/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster_id = proto.Field( - proto.STRING, - number=3, - ) - logging_service = proto.Field( - proto.STRING, - number=4, - ) - name = proto.Field( - proto.STRING, - number=5, - ) - - -class SetMonitoringServiceRequest(proto.Message): - r"""SetMonitoringServiceRequest sets the monitoring service of a - cluster. - - Attributes: - project_id (str): - Required. Deprecated. The Google Developers Console `project - ID or project - number `__. - This field has been deprecated and replaced by the name - field. - zone (str): - Required. Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the name field. - cluster_id (str): - Required. Deprecated. The name of the cluster - to upgrade. This field has been deprecated and - replaced by the name field. - monitoring_service (str): - Required. The monitoring service the cluster should use to - write metrics. Currently available options: - - - "monitoring.googleapis.com/kubernetes" - The Cloud - Monitoring service with a Kubernetes-native resource - model - - ``monitoring.googleapis.com`` - The legacy Cloud - Monitoring service (no longer available as of GKE 1.15). - - ``none`` - No metrics will be exported from the cluster. - - If left as an empty - string,\ ``monitoring.googleapis.com/kubernetes`` will be - used for GKE 1.14+ or ``monitoring.googleapis.com`` for - earlier versions. - name (str): - The name (project, location, cluster) of the cluster to set - monitoring. Specified in the format - ``projects/*/locations/*/clusters/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster_id = proto.Field( - proto.STRING, - number=3, - ) - monitoring_service = proto.Field( - proto.STRING, - number=4, - ) - name = proto.Field( - proto.STRING, - number=6, - ) - - -class SetAddonsConfigRequest(proto.Message): - r"""SetAddonsRequest sets the addons associated with the cluster. - Attributes: - project_id (str): - Required. Deprecated. The Google Developers Console `project - ID or project - number `__. - This field has been deprecated and replaced by the name - field. - zone (str): - Required. Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the name field. - cluster_id (str): - Required. Deprecated. The name of the cluster - to upgrade. This field has been deprecated and - replaced by the name field. - addons_config (google.container_v1beta1.types.AddonsConfig): - Required. The desired configurations for the - various addons available to run in the cluster. - name (str): - The name (project, location, cluster) of the cluster to set - addons. Specified in the format - ``projects/*/locations/*/clusters/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster_id = proto.Field( - proto.STRING, - number=3, - ) - addons_config = proto.Field( - proto.MESSAGE, - number=4, - message='AddonsConfig', - ) - name = proto.Field( - proto.STRING, - number=6, - ) - - -class SetLocationsRequest(proto.Message): - r"""SetLocationsRequest sets the locations of the cluster. - Attributes: - project_id (str): - Required. Deprecated. The Google Developers Console `project - ID or project - number `__. - This field has been deprecated and replaced by the name - field. - zone (str): - Required. Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the name field. - cluster_id (str): - Required. Deprecated. The name of the cluster - to upgrade. This field has been deprecated and - replaced by the name field. - locations (Sequence[str]): - Required. The desired list of Google Compute Engine - `zones `__ - in which the cluster's nodes should be located. Changing the - locations a cluster is in will result in nodes being either - created or removed from the cluster, depending on whether - locations are being added or removed. - - This list must always include the cluster's primary zone. - name (str): - The name (project, location, cluster) of the cluster to set - locations. Specified in the format - ``projects/*/locations/*/clusters/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster_id = proto.Field( - proto.STRING, - number=3, - ) - locations = proto.RepeatedField( - proto.STRING, - number=4, - ) - name = proto.Field( - proto.STRING, - number=6, - ) - - -class UpdateMasterRequest(proto.Message): - r"""UpdateMasterRequest updates the master of the cluster. - Attributes: - project_id (str): - Required. Deprecated. The Google Developers Console `project - ID or project - number `__. - This field has been deprecated and replaced by the name - field. - zone (str): - Required. Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the name field. - cluster_id (str): - Required. Deprecated. The name of the cluster - to upgrade. This field has been deprecated and - replaced by the name field. - master_version (str): - Required. The Kubernetes version to change - the master to. - Users may specify either explicit versions - offered by Kubernetes Engine or version aliases, - which have the following behavior: - - "latest": picks the highest valid Kubernetes - version - "1.X": picks the highest valid - patch+gke.N patch in the 1.X version - "1.X.Y": - picks the highest valid gke.N patch in the 1.X.Y - version - "1.X.Y-gke.N": picks an explicit - Kubernetes version - "-": picks the default - Kubernetes version - name (str): - The name (project, location, cluster) of the cluster to - update. Specified in the format - ``projects/*/locations/*/clusters/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster_id = proto.Field( - proto.STRING, - number=3, - ) - master_version = proto.Field( - proto.STRING, - number=4, - ) - name = proto.Field( - proto.STRING, - number=7, - ) - - -class SetMasterAuthRequest(proto.Message): - r"""SetMasterAuthRequest updates the admin password of a cluster. - Attributes: - project_id (str): - Required. Deprecated. The Google Developers Console `project - ID or project - number `__. - This field has been deprecated and replaced by the name - field. - zone (str): - Required. Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the name field. - cluster_id (str): - Required. Deprecated. The name of the cluster - to upgrade. This field has been deprecated and - replaced by the name field. - action (google.container_v1beta1.types.SetMasterAuthRequest.Action): - Required. The exact form of action to be - taken on the master auth. - update (google.container_v1beta1.types.MasterAuth): - Required. A description of the update. - name (str): - The name (project, location, cluster) of the cluster to set - auth. Specified in the format - ``projects/*/locations/*/clusters/*``. - """ - class Action(proto.Enum): - r"""Operation type: what type update to perform.""" - UNKNOWN = 0 - SET_PASSWORD = 1 - GENERATE_PASSWORD = 2 - SET_USERNAME = 3 - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster_id = proto.Field( - proto.STRING, - number=3, - ) - action = proto.Field( - proto.ENUM, - number=4, - enum=Action, - ) - update = proto.Field( - proto.MESSAGE, - number=5, - message='MasterAuth', - ) - name = proto.Field( - proto.STRING, - number=7, - ) - - -class DeleteClusterRequest(proto.Message): - r"""DeleteClusterRequest deletes a cluster. - Attributes: - project_id (str): - Required. Deprecated. The Google Developers Console `project - ID or project - number `__. - This field has been deprecated and replaced by the name - field. - zone (str): - Required. Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the name field. - cluster_id (str): - Required. Deprecated. The name of the cluster - to delete. This field has been deprecated and - replaced by the name field. - name (str): - The name (project, location, cluster) of the cluster to - delete. Specified in the format - ``projects/*/locations/*/clusters/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster_id = proto.Field( - proto.STRING, - number=3, - ) - name = proto.Field( - proto.STRING, - number=4, - ) - - -class ListClustersRequest(proto.Message): - r"""ListClustersRequest lists clusters. - Attributes: - project_id (str): - Required. Deprecated. The Google Developers Console `project - ID or project - number `__. - This field has been deprecated and replaced by the parent - field. - zone (str): - Required. Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides, or "-" for all zones. This - field has been deprecated and replaced by the parent field. - parent (str): - The parent (project and location) where the clusters will be - listed. Specified in the format ``projects/*/locations/*``. - Location "-" matches all zones and all regions. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - parent = proto.Field( - proto.STRING, - number=4, - ) - - -class ListClustersResponse(proto.Message): - r"""ListClustersResponse is the result of ListClustersRequest. - Attributes: - clusters (Sequence[google.container_v1beta1.types.Cluster]): - A list of clusters in the project in the - specified zone, or across all ones. - missing_zones (Sequence[str]): - If any zones are listed here, the list of - clusters returned may be missing those zones. - """ - - clusters = proto.RepeatedField( - proto.MESSAGE, - number=1, - message='Cluster', - ) - missing_zones = proto.RepeatedField( - proto.STRING, - number=2, - ) - - -class GetOperationRequest(proto.Message): - r"""GetOperationRequest gets a single operation. - Attributes: - project_id (str): - Required. Deprecated. The Google Developers Console `project - ID or project - number `__. - This field has been deprecated and replaced by the name - field. - zone (str): - Required. Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the name field. - operation_id (str): - Required. Deprecated. The server-assigned ``name`` of the - operation. This field has been deprecated and replaced by - the name field. - name (str): - The name (project, location, operation id) of the operation - to get. Specified in the format - ``projects/*/locations/*/operations/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - operation_id = proto.Field( - proto.STRING, - number=3, - ) - name = proto.Field( - proto.STRING, - number=5, - ) - - -class ListOperationsRequest(proto.Message): - r"""ListOperationsRequest lists operations. - Attributes: - project_id (str): - Required. Deprecated. The Google Developers Console `project - ID or project - number `__. - This field has been deprecated and replaced by the parent - field. - zone (str): - Required. Deprecated. The name of the Google Compute Engine - `zone `__ - to return operations for, or ``-`` for all zones. This field - has been deprecated and replaced by the parent field. - parent (str): - The parent (project and location) where the operations will - be listed. Specified in the format - ``projects/*/locations/*``. Location "-" matches all zones - and all regions. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - parent = proto.Field( - proto.STRING, - number=4, - ) - - -class CancelOperationRequest(proto.Message): - r"""CancelOperationRequest cancels a single operation. - Attributes: - project_id (str): - Required. Deprecated. The Google Developers Console `project - ID or project - number `__. - This field has been deprecated and replaced by the name - field. - zone (str): - Required. Deprecated. The name of the Google Compute Engine - `zone `__ - in which the operation resides. This field has been - deprecated and replaced by the name field. - operation_id (str): - Required. Deprecated. The server-assigned ``name`` of the - operation. This field has been deprecated and replaced by - the name field. - name (str): - The name (project, location, operation id) of the operation - to cancel. Specified in the format - ``projects/*/locations/*/operations/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - operation_id = proto.Field( - proto.STRING, - number=3, - ) - name = proto.Field( - proto.STRING, - number=4, - ) - - -class ListOperationsResponse(proto.Message): - r"""ListOperationsResponse is the result of - ListOperationsRequest. - - Attributes: - operations (Sequence[google.container_v1beta1.types.Operation]): - A list of operations in the project in the - specified zone. - missing_zones (Sequence[str]): - If any zones are listed here, the list of - operations returned may be missing the - operations from those zones. - """ - - operations = proto.RepeatedField( - proto.MESSAGE, - number=1, - message='Operation', - ) - missing_zones = proto.RepeatedField( - proto.STRING, - number=2, - ) - - -class GetServerConfigRequest(proto.Message): - r"""Gets the current Kubernetes Engine service configuration. - Attributes: - project_id (str): - Required. Deprecated. The Google Developers Console `project - ID or project - number `__. - This field has been deprecated and replaced by the name - field. - zone (str): - Required. Deprecated. The name of the Google Compute Engine - `zone `__ - to return operations for. This field has been deprecated and - replaced by the name field. - name (str): - The name (project and location) of the server config to get, - specified in the format ``projects/*/locations/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - name = proto.Field( - proto.STRING, - number=4, - ) - - -class ServerConfig(proto.Message): - r"""Kubernetes Engine service configuration. - Attributes: - default_cluster_version (str): - Version of Kubernetes the service deploys by - default. - valid_node_versions (Sequence[str]): - List of valid node upgrade target versions, - in descending order. - default_image_type (str): - Default image type. - valid_image_types (Sequence[str]): - List of valid image types. - valid_master_versions (Sequence[str]): - List of valid master versions, in descending - order. - channels (Sequence[google.container_v1beta1.types.ServerConfig.ReleaseChannelConfig]): - List of release channel configurations. - """ - - class ReleaseChannelConfig(proto.Message): - r"""ReleaseChannelConfig exposes configuration for a release - channel. - - Attributes: - channel (google.container_v1beta1.types.ReleaseChannel.Channel): - The release channel this configuration - applies to. - default_version (str): - The default version for newly created - clusters on the channel. - available_versions (Sequence[google.container_v1beta1.types.ServerConfig.ReleaseChannelConfig.AvailableVersion]): - Deprecated. This field has been deprecated and replaced with - the valid_versions field. - valid_versions (Sequence[str]): - List of valid versions for the channel. - """ - - class AvailableVersion(proto.Message): - r"""Deprecated. - Attributes: - version (str): - Kubernetes version. - reason (str): - Reason for availability. - """ - - version = proto.Field( - proto.STRING, - number=1, - ) - reason = proto.Field( - proto.STRING, - number=2, - ) - - channel = proto.Field( - proto.ENUM, - number=1, - enum='ReleaseChannel.Channel', - ) - default_version = proto.Field( - proto.STRING, - number=2, - ) - available_versions = proto.RepeatedField( - proto.MESSAGE, - number=3, - message='ServerConfig.ReleaseChannelConfig.AvailableVersion', - ) - valid_versions = proto.RepeatedField( - proto.STRING, - number=4, - ) - - default_cluster_version = proto.Field( - proto.STRING, - number=1, - ) - valid_node_versions = proto.RepeatedField( - proto.STRING, - number=3, - ) - default_image_type = proto.Field( - proto.STRING, - number=4, - ) - valid_image_types = proto.RepeatedField( - proto.STRING, - number=5, - ) - valid_master_versions = proto.RepeatedField( - proto.STRING, - number=6, - ) - channels = proto.RepeatedField( - proto.MESSAGE, - number=9, - message=ReleaseChannelConfig, - ) - - -class CreateNodePoolRequest(proto.Message): - r"""CreateNodePoolRequest creates a node pool for a cluster. - Attributes: - project_id (str): - Required. Deprecated. The Google Developers Console `project - ID or project - number `__. - This field has been deprecated and replaced by the parent - field. - zone (str): - Required. Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the parent field. - cluster_id (str): - Required. Deprecated. The name of the - cluster. This field has been deprecated and - replaced by the parent field. - node_pool (google.container_v1beta1.types.NodePool): - Required. The node pool to create. - parent (str): - The parent (project, location, cluster id) where the node - pool will be created. Specified in the format - ``projects/*/locations/*/clusters/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster_id = proto.Field( - proto.STRING, - number=3, - ) - node_pool = proto.Field( - proto.MESSAGE, - number=4, - message='NodePool', - ) - parent = proto.Field( - proto.STRING, - number=6, - ) - - -class DeleteNodePoolRequest(proto.Message): - r"""DeleteNodePoolRequest deletes a node pool for a cluster. - Attributes: - project_id (str): - Required. Deprecated. The Google Developers Console `project - ID or project - number `__. - This field has been deprecated and replaced by the name - field. - zone (str): - Required. Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the name field. - cluster_id (str): - Required. Deprecated. The name of the - cluster. This field has been deprecated and - replaced by the name field. - node_pool_id (str): - Required. Deprecated. The name of the node - pool to delete. This field has been deprecated - and replaced by the name field. - name (str): - The name (project, location, cluster, node pool id) of the - node pool to delete. Specified in the format - ``projects/*/locations/*/clusters/*/nodePools/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster_id = proto.Field( - proto.STRING, - number=3, - ) - node_pool_id = proto.Field( - proto.STRING, - number=4, - ) - name = proto.Field( - proto.STRING, - number=6, - ) - - -class ListNodePoolsRequest(proto.Message): - r"""ListNodePoolsRequest lists the node pool(s) for a cluster. - Attributes: - project_id (str): - Required. Deprecated. The Google Developers Console `project - ID or project - number `__. - This field has been deprecated and replaced by the parent - field. - zone (str): - Required. Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the parent field. - cluster_id (str): - Required. Deprecated. The name of the - cluster. This field has been deprecated and - replaced by the parent field. - parent (str): - The parent (project, location, cluster id) where the node - pools will be listed. Specified in the format - ``projects/*/locations/*/clusters/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster_id = proto.Field( - proto.STRING, - number=3, - ) - parent = proto.Field( - proto.STRING, - number=5, - ) - - -class GetNodePoolRequest(proto.Message): - r"""GetNodePoolRequest retrieves a node pool for a cluster. - Attributes: - project_id (str): - Required. Deprecated. The Google Developers Console `project - ID or project - number `__. - This field has been deprecated and replaced by the name - field. - zone (str): - Required. Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the name field. - cluster_id (str): - Required. Deprecated. The name of the - cluster. This field has been deprecated and - replaced by the name field. - node_pool_id (str): - Required. Deprecated. The name of the node - pool. This field has been deprecated and - replaced by the name field. - name (str): - The name (project, location, cluster, node pool id) of the - node pool to get. Specified in the format - ``projects/*/locations/*/clusters/*/nodePools/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster_id = proto.Field( - proto.STRING, - number=3, - ) - node_pool_id = proto.Field( - proto.STRING, - number=4, - ) - name = proto.Field( - proto.STRING, - number=6, - ) - - -class NodePool(proto.Message): - r"""NodePool contains the name and configuration for a cluster's - node pool. Node pools are a set of nodes (i.e. VM's), with a - common configuration and specification, under the control of the - cluster master. They may have a set of Kubernetes labels applied - to them, which may be used to reference them during pod - scheduling. They may also be resized up or down, to accommodate - the workload. - - Attributes: - name (str): - The name of the node pool. - config (google.container_v1beta1.types.NodeConfig): - The node configuration of the pool. - initial_node_count (int): - The initial node count for the pool. You must ensure that - your Compute Engine `resource - quota `__ is - sufficient for this number of instances. You must also have - available firewall and routes quota. - locations (Sequence[str]): - The list of Google Compute Engine - `zones `__ - in which the NodePool's nodes should be located. - - If this value is unspecified during node pool creation, the - `Cluster.Locations `__ - value will be used, instead. - - Warning: changing node pool locations will result in nodes - being added and/or removed. - self_link (str): - [Output only] Server-defined URL for the resource. - version (str): - The version of the Kubernetes of this node. - instance_group_urls (Sequence[str]): - [Output only] The resource URLs of the `managed instance - groups `__ - associated with this node pool. - status (google.container_v1beta1.types.NodePool.Status): - [Output only] The status of the nodes in this pool instance. - status_message (str): - [Output only] Deprecated. Use conditions instead. Additional - information about the current status of this node pool - instance, if available. - autoscaling (google.container_v1beta1.types.NodePoolAutoscaling): - Autoscaler configuration for this NodePool. - Autoscaler is enabled only if a valid - configuration is present. - management (google.container_v1beta1.types.NodeManagement): - NodeManagement configuration for this - NodePool. - max_pods_constraint (google.container_v1beta1.types.MaxPodsConstraint): - The constraint on the maximum number of pods - that can be run simultaneously on a node in the - node pool. - conditions (Sequence[google.container_v1beta1.types.StatusCondition]): - Which conditions caused the current node pool - state. - pod_ipv4_cidr_size (int): - [Output only] The pod CIDR block size per node in this node - pool. - upgrade_settings (google.container_v1beta1.types.NodePool.UpgradeSettings): - Upgrade settings control disruption and speed - of the upgrade. - """ - class Status(proto.Enum): - r"""The current status of the node pool instance.""" - STATUS_UNSPECIFIED = 0 - PROVISIONING = 1 - RUNNING = 2 - RUNNING_WITH_ERROR = 3 - RECONCILING = 4 - STOPPING = 5 - ERROR = 6 - - class UpgradeSettings(proto.Message): - r"""These upgrade settings control the level of parallelism and - the level of disruption caused by an upgrade. - - maxUnavailable controls the number of nodes that can be - simultaneously unavailable. - - maxSurge controls the number of additional nodes that can be - added to the node pool temporarily for the time of the upgrade - to increase the number of available nodes. - - (maxUnavailable + maxSurge) determines the level of parallelism - (how many nodes are being upgraded at the same time). - - Note: upgrades inevitably introduce some disruption since - workloads need to be moved from old nodes to new, upgraded ones. - Even if maxUnavailable=0, this holds true. (Disruption stays - within the limits of PodDisruptionBudget, if it is configured.) - - Consider a hypothetical node pool with 5 nodes having - maxSurge=2, maxUnavailable=1. This means the upgrade process - upgrades 3 nodes simultaneously. It creates 2 additional - (upgraded) nodes, then it brings down 3 old (not yet upgraded) - nodes at the same time. This ensures that there are always at - least 4 nodes available. - - Attributes: - max_surge (int): - The maximum number of nodes that can be - created beyond the current size of the node pool - during the upgrade process. - max_unavailable (int): - The maximum number of nodes that can be - simultaneously unavailable during the upgrade - process. A node is considered available if its - status is Ready. - """ - - max_surge = proto.Field( - proto.INT32, - number=1, - ) - max_unavailable = proto.Field( - proto.INT32, - number=2, - ) - - name = proto.Field( - proto.STRING, - number=1, - ) - config = proto.Field( - proto.MESSAGE, - number=2, - message='NodeConfig', - ) - initial_node_count = proto.Field( - proto.INT32, - number=3, - ) - locations = proto.RepeatedField( - proto.STRING, - number=13, - ) - self_link = proto.Field( - proto.STRING, - number=100, - ) - version = proto.Field( - proto.STRING, - number=101, - ) - instance_group_urls = proto.RepeatedField( - proto.STRING, - number=102, - ) - status = proto.Field( - proto.ENUM, - number=103, - enum=Status, - ) - status_message = proto.Field( - proto.STRING, - number=104, - ) - autoscaling = proto.Field( - proto.MESSAGE, - number=4, - message='NodePoolAutoscaling', - ) - management = proto.Field( - proto.MESSAGE, - number=5, - message='NodeManagement', - ) - max_pods_constraint = proto.Field( - proto.MESSAGE, - number=6, - message='MaxPodsConstraint', - ) - conditions = proto.RepeatedField( - proto.MESSAGE, - number=105, - message='StatusCondition', - ) - pod_ipv4_cidr_size = proto.Field( - proto.INT32, - number=7, - ) - upgrade_settings = proto.Field( - proto.MESSAGE, - number=107, - message=UpgradeSettings, - ) - - -class NodeManagement(proto.Message): - r"""NodeManagement defines the set of node management services - turned on for the node pool. - - Attributes: - auto_upgrade (bool): - Whether the nodes will be automatically - upgraded. - auto_repair (bool): - Whether the nodes will be automatically - repaired. - upgrade_options (google.container_v1beta1.types.AutoUpgradeOptions): - Specifies the Auto Upgrade knobs for the node - pool. - """ - - auto_upgrade = proto.Field( - proto.BOOL, - number=1, - ) - auto_repair = proto.Field( - proto.BOOL, - number=2, - ) - upgrade_options = proto.Field( - proto.MESSAGE, - number=10, - message='AutoUpgradeOptions', - ) - - -class AutoUpgradeOptions(proto.Message): - r"""AutoUpgradeOptions defines the set of options for the user to - control how the Auto Upgrades will proceed. - - Attributes: - auto_upgrade_start_time (str): - [Output only] This field is set when upgrades are about to - commence with the approximate start time for the upgrades, - in `RFC3339 `__ text - format. - description (str): - [Output only] This field is set when upgrades are about to - commence with the description of the upgrade. - """ - - auto_upgrade_start_time = proto.Field( - proto.STRING, - number=1, - ) - description = proto.Field( - proto.STRING, - number=2, - ) - - -class MaintenancePolicy(proto.Message): - r"""MaintenancePolicy defines the maintenance policy to be used - for the cluster. - - Attributes: - window (google.container_v1beta1.types.MaintenanceWindow): - Specifies the maintenance window in which - maintenance may be performed. - resource_version (str): - A hash identifying the version of this policy, so that - updates to fields of the policy won't accidentally undo - intermediate changes (and so that users of the API unaware - of some fields won't accidentally remove other fields). Make - a ``get()`` request to the cluster to get the current - resource version and include it with requests to set the - policy. - """ - - window = proto.Field( - proto.MESSAGE, - number=1, - message='MaintenanceWindow', - ) - resource_version = proto.Field( - proto.STRING, - number=3, - ) - - -class MaintenanceWindow(proto.Message): - r"""MaintenanceWindow defines the maintenance window to be used - for the cluster. - - Attributes: - daily_maintenance_window (google.container_v1beta1.types.DailyMaintenanceWindow): - DailyMaintenanceWindow specifies a daily - maintenance operation window. - recurring_window (google.container_v1beta1.types.RecurringTimeWindow): - RecurringWindow specifies some number of - recurring time periods for maintenance to occur. - The time windows may be overlapping. If no - maintenance windows are set, maintenance can - occur at any time. - maintenance_exclusions (Sequence[google.container_v1beta1.types.MaintenanceWindow.MaintenanceExclusionsEntry]): - Exceptions to maintenance window. Non- - mergency maintenance should not occur in these - windows. - """ - - daily_maintenance_window = proto.Field( - proto.MESSAGE, - number=2, - oneof='policy', - message='DailyMaintenanceWindow', - ) - recurring_window = proto.Field( - proto.MESSAGE, - number=3, - oneof='policy', - message='RecurringTimeWindow', - ) - maintenance_exclusions = proto.MapField( - proto.STRING, - proto.MESSAGE, - number=4, - message='TimeWindow', - ) - - -class TimeWindow(proto.Message): - r"""Represents an arbitrary window of time. - Attributes: - start_time (google.protobuf.timestamp_pb2.Timestamp): - The time that the window first starts. - end_time (google.protobuf.timestamp_pb2.Timestamp): - The time that the window ends. The end time - should take place after the start time. - """ - - start_time = proto.Field( - proto.MESSAGE, - number=1, - message=timestamp_pb2.Timestamp, - ) - end_time = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - - -class RecurringTimeWindow(proto.Message): - r"""Represents an arbitrary window of time that recurs. - Attributes: - window (google.container_v1beta1.types.TimeWindow): - The window of the first recurrence. - recurrence (str): - An RRULE - (https://tools.ietf.org/html/rfc5545#section-3.8.5.3) for - how this window reccurs. They go on for the span of time - between the start and end time. - - For example, to have something repeat every weekday, you'd - use: ``FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR`` - - To repeat some window daily (equivalent to the - DailyMaintenanceWindow): ``FREQ=DAILY`` - - For the first weekend of every month: - ``FREQ=MONTHLY;BYSETPOS=1;BYDAY=SA,SU`` - - This specifies how frequently the window starts. Eg, if you - wanted to have a 9-5 UTC-4 window every weekday, you'd use - something like: - - :: - - start time = 2019-01-01T09:00:00-0400 - end time = 2019-01-01T17:00:00-0400 - recurrence = FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR - - Windows can span multiple days. Eg, to make the window - encompass every weekend from midnight Saturday till the last - minute of Sunday UTC: - - :: - - start time = 2019-01-05T00:00:00Z - end time = 2019-01-07T23:59:00Z - recurrence = FREQ=WEEKLY;BYDAY=SA - - Note the start and end time's specific dates are largely - arbitrary except to specify duration of the window and when - it first starts. The FREQ values of HOURLY, MINUTELY, and - SECONDLY are not supported. - """ - - window = proto.Field( - proto.MESSAGE, - number=1, - message='TimeWindow', - ) - recurrence = proto.Field( - proto.STRING, - number=2, - ) - - -class DailyMaintenanceWindow(proto.Message): - r"""Time window specified for daily maintenance operations. - Attributes: - start_time (str): - Time within the maintenance window to start the maintenance - operations. It must be in format "HH:MM", where HH : [00-23] - and MM : [00-59] GMT. - duration (str): - [Output only] Duration of the time window, automatically - chosen to be smallest possible in the given scenario. - """ - - start_time = proto.Field( - proto.STRING, - number=2, - ) - duration = proto.Field( - proto.STRING, - number=3, - ) - - -class SetNodePoolManagementRequest(proto.Message): - r"""SetNodePoolManagementRequest sets the node management - properties of a node pool. - - Attributes: - project_id (str): - Required. Deprecated. The Google Developers Console `project - ID or project - number `__. - This field has been deprecated and replaced by the name - field. - zone (str): - Required. Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the name field. - cluster_id (str): - Required. Deprecated. The name of the cluster - to update. This field has been deprecated and - replaced by the name field. - node_pool_id (str): - Required. Deprecated. The name of the node - pool to update. This field has been deprecated - and replaced by the name field. - management (google.container_v1beta1.types.NodeManagement): - Required. NodeManagement configuration for - the node pool. - name (str): - The name (project, location, cluster, node pool id) of the - node pool to set management properties. Specified in the - format ``projects/*/locations/*/clusters/*/nodePools/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster_id = proto.Field( - proto.STRING, - number=3, - ) - node_pool_id = proto.Field( - proto.STRING, - number=4, - ) - management = proto.Field( - proto.MESSAGE, - number=5, - message='NodeManagement', - ) - name = proto.Field( - proto.STRING, - number=7, - ) - - -class SetNodePoolSizeRequest(proto.Message): - r"""SetNodePoolSizeRequest sets the size a node - pool. - - Attributes: - project_id (str): - Required. Deprecated. The Google Developers Console `project - ID or project - number `__. - This field has been deprecated and replaced by the name - field. - zone (str): - Required. Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the name field. - cluster_id (str): - Required. Deprecated. The name of the cluster - to update. This field has been deprecated and - replaced by the name field. - node_pool_id (str): - Required. Deprecated. The name of the node - pool to update. This field has been deprecated - and replaced by the name field. - node_count (int): - Required. The desired node count for the - pool. - name (str): - The name (project, location, cluster, node pool id) of the - node pool to set size. Specified in the format - ``projects/*/locations/*/clusters/*/nodePools/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster_id = proto.Field( - proto.STRING, - number=3, - ) - node_pool_id = proto.Field( - proto.STRING, - number=4, - ) - node_count = proto.Field( - proto.INT32, - number=5, - ) - name = proto.Field( - proto.STRING, - number=7, - ) - - -class RollbackNodePoolUpgradeRequest(proto.Message): - r"""RollbackNodePoolUpgradeRequest rollbacks the previously - Aborted or Failed NodePool upgrade. This will be an no-op if the - last upgrade successfully completed. - - Attributes: - project_id (str): - Required. Deprecated. The Google Developers Console `project - ID or project - number `__. - This field has been deprecated and replaced by the name - field. - zone (str): - Required. Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the name field. - cluster_id (str): - Required. Deprecated. The name of the cluster - to rollback. This field has been deprecated and - replaced by the name field. - node_pool_id (str): - Required. Deprecated. The name of the node - pool to rollback. This field has been deprecated - and replaced by the name field. - name (str): - The name (project, location, cluster, node pool id) of the - node poll to rollback upgrade. Specified in the format - ``projects/*/locations/*/clusters/*/nodePools/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster_id = proto.Field( - proto.STRING, - number=3, - ) - node_pool_id = proto.Field( - proto.STRING, - number=4, - ) - name = proto.Field( - proto.STRING, - number=6, - ) - - -class ListNodePoolsResponse(proto.Message): - r"""ListNodePoolsResponse is the result of ListNodePoolsRequest. - Attributes: - node_pools (Sequence[google.container_v1beta1.types.NodePool]): - A list of node pools for a cluster. - """ - - node_pools = proto.RepeatedField( - proto.MESSAGE, - number=1, - message='NodePool', - ) - - -class ClusterAutoscaling(proto.Message): - r"""ClusterAutoscaling contains global, per-cluster information - required by Cluster Autoscaler to automatically adjust the size - of the cluster and create/delete - node pools based on the current needs. - - Attributes: - enable_node_autoprovisioning (bool): - Enables automatic node pool creation and - deletion. - resource_limits (Sequence[google.container_v1beta1.types.ResourceLimit]): - Contains global constraints regarding minimum - and maximum amount of resources in the cluster. - autoscaling_profile (google.container_v1beta1.types.ClusterAutoscaling.AutoscalingProfile): - Defines autoscaling behaviour. - autoprovisioning_node_pool_defaults (google.container_v1beta1.types.AutoprovisioningNodePoolDefaults): - AutoprovisioningNodePoolDefaults contains - defaults for a node pool created by NAP. - autoprovisioning_locations (Sequence[str]): - The list of Google Compute Engine - `zones `__ - in which the NodePool's nodes can be created by NAP. - """ - class AutoscalingProfile(proto.Enum): - r"""Defines possible options for autoscaling_profile field.""" - PROFILE_UNSPECIFIED = 0 - OPTIMIZE_UTILIZATION = 1 - BALANCED = 2 - - enable_node_autoprovisioning = proto.Field( - proto.BOOL, - number=1, - ) - resource_limits = proto.RepeatedField( - proto.MESSAGE, - number=2, - message='ResourceLimit', - ) - autoscaling_profile = proto.Field( - proto.ENUM, - number=3, - enum=AutoscalingProfile, - ) - autoprovisioning_node_pool_defaults = proto.Field( - proto.MESSAGE, - number=4, - message='AutoprovisioningNodePoolDefaults', - ) - autoprovisioning_locations = proto.RepeatedField( - proto.STRING, - number=5, - ) - - -class AutoprovisioningNodePoolDefaults(proto.Message): - r"""AutoprovisioningNodePoolDefaults contains defaults for a node - pool created by NAP. - - Attributes: - oauth_scopes (Sequence[str]): - The set of Google API scopes to be made available on all of - the node VMs under the "default" service account. - - The following scopes are recommended, but not required, and - by default are not included: - - - ``https://www.googleapis.com/auth/compute`` is required - for mounting persistent storage on your nodes. - - ``https://www.googleapis.com/auth/devstorage.read_only`` - is required for communicating with **gcr.io** (the - `Google Container - Registry `__). - - If unspecified, no scopes are added, unless Cloud Logging or - Cloud Monitoring are enabled, in which case their required - scopes will be added. - service_account (str): - The Google Cloud Platform Service Account to - be used by the node VMs. Specify the email - address of the Service Account; otherwise, if no - Service Account is specified, the "default" - service account is used. - upgrade_settings (google.container_v1beta1.types.NodePool.UpgradeSettings): - Upgrade settings control disruption and speed - of the upgrade. - management (google.container_v1beta1.types.NodeManagement): - NodeManagement configuration for this - NodePool. - min_cpu_platform (str): - Minimum CPU platform to be used by this instance. The - instance may be scheduled on the specified or newer CPU - platform. Applicable values are the friendly names of CPU - platforms, such as ``minCpuPlatform: "Intel Haswell"`` or - ``minCpuPlatform: "Intel Sandy Bridge"``. For more - information, read `how to specify min CPU - platform `__ - To unset the min cpu platform field pass "automatic" as - field value. - disk_size_gb (int): - Size of the disk attached to each node, - specified in GB. The smallest allowed disk size - is 10GB. - If unspecified, the default disk size is 100GB. - disk_type (str): - Type of the disk attached to each node (e.g. - 'pd-standard', 'pd-ssd' or 'pd-balanced') - - If unspecified, the default disk type is 'pd- - standard' - shielded_instance_config (google.container_v1beta1.types.ShieldedInstanceConfig): - Shielded Instance options. - boot_disk_kms_key (str): - The Customer Managed Encryption Key used to encrypt the boot - disk attached to each node in the node pool. This should be - of the form - projects/[KEY_PROJECT_ID]/locations/[LOCATION]/keyRings/[RING_NAME]/cryptoKeys/[KEY_NAME]. - For more information about protecting resources with Cloud - KMS Keys please see: - https://cloud.google.com/compute/docs/disks/customer-managed-encryption - image_type (str): - The image type to use for node created by - NodeAutoprovisioning. - """ - - oauth_scopes = proto.RepeatedField( - proto.STRING, - number=1, - ) - service_account = proto.Field( - proto.STRING, - number=2, - ) - upgrade_settings = proto.Field( - proto.MESSAGE, - number=3, - message='NodePool.UpgradeSettings', - ) - management = proto.Field( - proto.MESSAGE, - number=4, - message='NodeManagement', - ) - min_cpu_platform = proto.Field( - proto.STRING, - number=5, - ) - disk_size_gb = proto.Field( - proto.INT32, - number=6, - ) - disk_type = proto.Field( - proto.STRING, - number=7, - ) - shielded_instance_config = proto.Field( - proto.MESSAGE, - number=8, - message='ShieldedInstanceConfig', - ) - boot_disk_kms_key = proto.Field( - proto.STRING, - number=9, - ) - image_type = proto.Field( - proto.STRING, - number=10, - ) - - -class ResourceLimit(proto.Message): - r"""Contains information about amount of some resource in the - cluster. For memory, value should be in GB. - - Attributes: - resource_type (str): - Resource name "cpu", "memory" or gpu-specific - string. - minimum (int): - Minimum amount of the resource in the - cluster. - maximum (int): - Maximum amount of the resource in the - cluster. - """ - - resource_type = proto.Field( - proto.STRING, - number=1, - ) - minimum = proto.Field( - proto.INT64, - number=2, - ) - maximum = proto.Field( - proto.INT64, - number=3, - ) - - -class NodePoolAutoscaling(proto.Message): - r"""NodePoolAutoscaling contains information required by cluster - autoscaler to adjust the size of the node pool to the current - cluster usage. - - Attributes: - enabled (bool): - Is autoscaling enabled for this node pool. - min_node_count (int): - Minimum number of nodes in the NodePool. Must be >= 1 and <= - max_node_count. - max_node_count (int): - Maximum number of nodes in the NodePool. Must be >= - min_node_count. There has to enough quota to scale up the - cluster. - autoprovisioned (bool): - Can this node pool be deleted automatically. - """ - - enabled = proto.Field( - proto.BOOL, - number=1, - ) - min_node_count = proto.Field( - proto.INT32, - number=2, - ) - max_node_count = proto.Field( - proto.INT32, - number=3, - ) - autoprovisioned = proto.Field( - proto.BOOL, - number=4, - ) - - -class SetLabelsRequest(proto.Message): - r"""SetLabelsRequest sets the Google Cloud Platform labels on a - Google Container Engine cluster, which will in turn set them for - Google Compute Engine resources used by that cluster - - Attributes: - project_id (str): - Required. Deprecated. The Google Developers Console `project - ID or project - number `__. - This field has been deprecated and replaced by the name - field. - zone (str): - Required. Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the name field. - cluster_id (str): - Required. Deprecated. The name of the - cluster. This field has been deprecated and - replaced by the name field. - resource_labels (Sequence[google.container_v1beta1.types.SetLabelsRequest.ResourceLabelsEntry]): - Required. The labels to set for that cluster. - label_fingerprint (str): - Required. The fingerprint of the previous set of labels for - this resource, used to detect conflicts. The fingerprint is - initially generated by Kubernetes Engine and changes after - every request to modify or update labels. You must always - provide an up-to-date fingerprint hash when updating or - changing labels. Make a ``get()`` request to the resource to - get the latest fingerprint. - name (str): - The name (project, location, cluster id) of the cluster to - set labels. Specified in the format - ``projects/*/locations/*/clusters/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster_id = proto.Field( - proto.STRING, - number=3, - ) - resource_labels = proto.MapField( - proto.STRING, - proto.STRING, - number=4, - ) - label_fingerprint = proto.Field( - proto.STRING, - number=5, - ) - name = proto.Field( - proto.STRING, - number=7, - ) - - -class SetLegacyAbacRequest(proto.Message): - r"""SetLegacyAbacRequest enables or disables the ABAC - authorization mechanism for a cluster. - - Attributes: - project_id (str): - Required. Deprecated. The Google Developers Console `project - ID or project - number `__. - This field has been deprecated and replaced by the name - field. - zone (str): - Required. Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the name field. - cluster_id (str): - Required. Deprecated. The name of the cluster - to update. This field has been deprecated and - replaced by the name field. - enabled (bool): - Required. Whether ABAC authorization will be - enabled in the cluster. - name (str): - The name (project, location, cluster id) of the cluster to - set legacy abac. Specified in the format - ``projects/*/locations/*/clusters/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster_id = proto.Field( - proto.STRING, - number=3, - ) - enabled = proto.Field( - proto.BOOL, - number=4, - ) - name = proto.Field( - proto.STRING, - number=6, - ) - - -class StartIPRotationRequest(proto.Message): - r"""StartIPRotationRequest creates a new IP for the cluster and - then performs a node upgrade on each node pool to point to the - new IP. - - Attributes: - project_id (str): - Required. Deprecated. The Google Developers Console `project - ID or project - number `__. - This field has been deprecated and replaced by the name - field. - zone (str): - Required. Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the name field. - cluster_id (str): - Required. Deprecated. The name of the - cluster. This field has been deprecated and - replaced by the name field. - name (str): - The name (project, location, cluster id) of the cluster to - start IP rotation. Specified in the format - ``projects/*/locations/*/clusters/*``. - rotate_credentials (bool): - Whether to rotate credentials during IP - rotation. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster_id = proto.Field( - proto.STRING, - number=3, - ) - name = proto.Field( - proto.STRING, - number=6, - ) - rotate_credentials = proto.Field( - proto.BOOL, - number=7, - ) - - -class CompleteIPRotationRequest(proto.Message): - r"""CompleteIPRotationRequest moves the cluster master back into - single-IP mode. - - Attributes: - project_id (str): - Required. Deprecated. The Google Developers Console `project - ID or project - number `__. - This field has been deprecated and replaced by the name - field. - zone (str): - Required. Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the name field. - cluster_id (str): - Required. Deprecated. The name of the - cluster. This field has been deprecated and - replaced by the name field. - name (str): - The name (project, location, cluster id) of the cluster to - complete IP rotation. Specified in the format - ``projects/*/locations/*/clusters/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster_id = proto.Field( - proto.STRING, - number=3, - ) - name = proto.Field( - proto.STRING, - number=7, - ) - - -class AcceleratorConfig(proto.Message): - r"""AcceleratorConfig represents a Hardware Accelerator request. - Attributes: - accelerator_count (int): - The number of the accelerator cards exposed - to an instance. - accelerator_type (str): - The accelerator type resource name. List of supported - accelerators - `here `__ - """ - - accelerator_count = proto.Field( - proto.INT64, - number=1, - ) - accelerator_type = proto.Field( - proto.STRING, - number=2, - ) - - -class WorkloadMetadataConfig(proto.Message): - r"""WorkloadMetadataConfig defines the metadata configuration to - expose to workloads on the node pool. - - Attributes: - node_metadata (google.container_v1beta1.types.WorkloadMetadataConfig.NodeMetadata): - NodeMetadata is the configuration for how to - expose metadata to the workloads running on the - node. - mode (google.container_v1beta1.types.WorkloadMetadataConfig.Mode): - Mode is the configuration for how to expose - metadata to workloads running on the node pool. - """ - class NodeMetadata(proto.Enum): - r"""NodeMetadata is the configuration for if and how to expose - the node metadata to the workload running on the node. - """ - UNSPECIFIED = 0 - SECURE = 1 - EXPOSE = 2 - GKE_METADATA_SERVER = 3 - - class Mode(proto.Enum): - r"""Mode is the configuration for how to expose metadata to - workloads running on the node. - """ - MODE_UNSPECIFIED = 0 - GCE_METADATA = 1 - GKE_METADATA = 2 - - node_metadata = proto.Field( - proto.ENUM, - number=1, - enum=NodeMetadata, - ) - mode = proto.Field( - proto.ENUM, - number=2, - enum=Mode, - ) - - -class SetNetworkPolicyRequest(proto.Message): - r"""SetNetworkPolicyRequest enables/disables network policy for a - cluster. - - Attributes: - project_id (str): - Required. Deprecated. The Google Developers Console `project - ID or project - number `__. - This field has been deprecated and replaced by the name - field. - zone (str): - Required. Deprecated. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. This field has been deprecated - and replaced by the name field. - cluster_id (str): - Required. Deprecated. The name of the - cluster. This field has been deprecated and - replaced by the name field. - network_policy (google.container_v1beta1.types.NetworkPolicy): - Required. Configuration options for the - NetworkPolicy feature. - name (str): - The name (project, location, cluster id) of the cluster to - set networking policy. Specified in the format - ``projects/*/locations/*/clusters/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster_id = proto.Field( - proto.STRING, - number=3, - ) - network_policy = proto.Field( - proto.MESSAGE, - number=4, - message='NetworkPolicy', - ) - name = proto.Field( - proto.STRING, - number=6, - ) - - -class SetMaintenancePolicyRequest(proto.Message): - r"""SetMaintenancePolicyRequest sets the maintenance policy for a - cluster. - - Attributes: - project_id (str): - Required. The Google Developers Console `project ID or - project - number `__. - zone (str): - Required. The name of the Google Compute Engine - `zone `__ - in which the cluster resides. - cluster_id (str): - Required. The name of the cluster to update. - maintenance_policy (google.container_v1beta1.types.MaintenancePolicy): - Required. The maintenance policy to be set - for the cluster. An empty field clears the - existing maintenance policy. - name (str): - The name (project, location, cluster id) of the cluster to - set maintenance policy. Specified in the format - ``projects/*/locations/*/clusters/*``. - """ - - project_id = proto.Field( - proto.STRING, - number=1, - ) - zone = proto.Field( - proto.STRING, - number=2, - ) - cluster_id = proto.Field( - proto.STRING, - number=3, - ) - maintenance_policy = proto.Field( - proto.MESSAGE, - number=4, - message='MaintenancePolicy', - ) - name = proto.Field( - proto.STRING, - number=5, - ) - - -class ListLocationsRequest(proto.Message): - r"""ListLocationsRequest is used to request the locations that - offer GKE. - - Attributes: - parent (str): - Required. Contains the name of the resource requested. - Specified in the format ``projects/*``. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - - -class ListLocationsResponse(proto.Message): - r"""ListLocationsResponse returns the list of all GKE locations - and their recommendation state. - - Attributes: - locations (Sequence[google.container_v1beta1.types.Location]): - A full list of GKE locations. - next_page_token (str): - Only return ListLocationsResponse that occur after the - page_token. This value should be populated from the - ListLocationsResponse.next_page_token if that response token - was set (which happens when listing more Locations than fit - in a single ListLocationsResponse). - """ - - @property - def raw_page(self): - return self - - locations = proto.RepeatedField( - proto.MESSAGE, - number=1, - message='Location', - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class Location(proto.Message): - r"""Location returns the location name, and if the location is - recommended for GKE cluster scheduling. - - Attributes: - type_ (google.container_v1beta1.types.Location.LocationType): - Contains the type of location this Location - is for. Regional or Zonal. - name (str): - Contains the name of the resource requested. Specified in - the format ``projects/*/locations/*``. - recommended (bool): - Whether the location is recomended for GKE - cluster scheduling. - """ - class LocationType(proto.Enum): - r"""LocationType is the type of GKE location, regional or zonal.""" - LOCATION_TYPE_UNSPECIFIED = 0 - ZONE = 1 - REGION = 2 - - type_ = proto.Field( - proto.ENUM, - number=1, - enum=LocationType, - ) - name = proto.Field( - proto.STRING, - number=2, - ) - recommended = proto.Field( - proto.BOOL, - number=3, - ) - - -class StatusCondition(proto.Message): - r"""StatusCondition describes why a cluster or a node pool has a - certain status (e.g., ERROR or DEGRADED). - - Attributes: - code (google.container_v1beta1.types.StatusCondition.Code): - Machine-friendly representation of the condition Deprecated. - Use canonical_code instead. - message (str): - Human-friendly representation of the - condition - canonical_code (google.rpc.code_pb2.Code): - Canonical code of the condition. - """ - class Code(proto.Enum): - r"""Code for each condition""" - UNKNOWN = 0 - GCE_STOCKOUT = 1 - GKE_SERVICE_ACCOUNT_DELETED = 2 - GCE_QUOTA_EXCEEDED = 3 - SET_BY_OPERATOR = 4 - CLOUD_KMS_KEY_ERROR = 7 - - code = proto.Field( - proto.ENUM, - number=1, - enum=Code, - ) - message = proto.Field( - proto.STRING, - number=2, - ) - canonical_code = proto.Field( - proto.ENUM, - number=3, - enum=code_pb2.Code, - ) - - -class NetworkConfig(proto.Message): - r"""NetworkConfig reports the relative names of network & - subnetwork. - - Attributes: - network (str): - Output only. The relative name of the Google Compute Engine - [network]`google.container.v1beta1.NetworkConfig.network `__ - to which the cluster is connected. Example: - projects/my-project/global/networks/my-network - subnetwork (str): - Output only. The relative name of the Google Compute Engine - `subnetwork `__ - to which the cluster is connected. Example: - projects/my-project/regions/us-central1/subnetworks/my-subnet - enable_intra_node_visibility (bool): - Whether Intra-node visibility is enabled for - this cluster. This makes same node pod to pod - traffic visible for VPC network. - default_snat_status (google.container_v1beta1.types.DefaultSnatStatus): - Whether the cluster disables default in-node sNAT rules. - In-node sNAT rules will be disabled when default_snat_status - is disabled. When disabled is set to false, default IP - masquerade rules will be applied to the nodes to prevent - sNAT on cluster internal traffic. - datapath_provider (google.container_v1beta1.types.DatapathProvider): - The desired datapath provider for this - cluster. By default, uses the IPTables-based - kube-proxy implementation. - """ - - network = proto.Field( - proto.STRING, - number=1, - ) - subnetwork = proto.Field( - proto.STRING, - number=2, - ) - enable_intra_node_visibility = proto.Field( - proto.BOOL, - number=5, - ) - default_snat_status = proto.Field( - proto.MESSAGE, - number=7, - message='DefaultSnatStatus', - ) - datapath_provider = proto.Field( - proto.ENUM, - number=11, - enum='DatapathProvider', - ) - - -class ListUsableSubnetworksRequest(proto.Message): - r"""ListUsableSubnetworksRequest requests the list of usable - subnetworks. available to a user for creating clusters. - - Attributes: - parent (str): - Required. The parent project where subnetworks are usable. - Specified in the format ``projects/*``. - filter (str): - Filtering currently only supports equality on the - networkProjectId and must be in the form: - "networkProjectId=[PROJECTID]", where ``networkProjectId`` - is the project which owns the listed subnetworks. This - defaults to the parent project ID. - page_size (int): - The max number of results per page that should be returned. - If the number of available results is larger than - ``page_size``, a ``next_page_token`` is returned which can - be used to get the next page of results in subsequent - requests. Acceptable values are 0 to 500, inclusive. - (Default: 500) - page_token (str): - Specifies a page token to use. Set this to - the nextPageToken returned by previous list - requests to get the next page of results. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - filter = proto.Field( - proto.STRING, - number=2, - ) - page_size = proto.Field( - proto.INT32, - number=3, - ) - page_token = proto.Field( - proto.STRING, - number=4, - ) - - -class ListUsableSubnetworksResponse(proto.Message): - r"""ListUsableSubnetworksResponse is the response of - ListUsableSubnetworksRequest. - - Attributes: - subnetworks (Sequence[google.container_v1beta1.types.UsableSubnetwork]): - A list of usable subnetworks in the specified - network project. - next_page_token (str): - This token allows you to get the next page of results for - list requests. If the number of results is larger than - ``page_size``, use the ``next_page_token`` as a value for - the query parameter ``page_token`` in the next request. The - value will become empty when there are no more pages. - """ - - @property - def raw_page(self): - return self - - subnetworks = proto.RepeatedField( - proto.MESSAGE, - number=1, - message='UsableSubnetwork', - ) - next_page_token = proto.Field( - proto.STRING, - number=2, - ) - - -class UsableSubnetworkSecondaryRange(proto.Message): - r"""Secondary IP range of a usable subnetwork. - Attributes: - range_name (str): - The name associated with this subnetwork - secondary range, used when adding an alias IP - range to a VM instance. - ip_cidr_range (str): - The range of IP addresses belonging to this - subnetwork secondary range. - status (google.container_v1beta1.types.UsableSubnetworkSecondaryRange.Status): - This field is to determine the status of the - secondary range programmably. - """ - class Status(proto.Enum): - r"""Status shows the current usage of a secondary IP range.""" - UNKNOWN = 0 - UNUSED = 1 - IN_USE_SERVICE = 2 - IN_USE_SHAREABLE_POD = 3 - IN_USE_MANAGED_POD = 4 - - range_name = proto.Field( - proto.STRING, - number=1, - ) - ip_cidr_range = proto.Field( - proto.STRING, - number=2, - ) - status = proto.Field( - proto.ENUM, - number=3, - enum=Status, - ) - - -class UsableSubnetwork(proto.Message): - r"""UsableSubnetwork resource returns the subnetwork name, its - associated network and the primary CIDR range. - - Attributes: - subnetwork (str): - Subnetwork Name. - Example: projects/my-project/regions/us- - central1/subnetworks/my-subnet - network (str): - Network Name. - Example: projects/my-project/global/networks/my- - network - ip_cidr_range (str): - The range of internal addresses that are - owned by this subnetwork. - secondary_ip_ranges (Sequence[google.container_v1beta1.types.UsableSubnetworkSecondaryRange]): - Secondary IP ranges. - status_message (str): - A human readable status message representing the reasons for - cases where the caller cannot use the secondary ranges under - the subnet. For example if the secondary_ip_ranges is empty - due to a permission issue, an insufficient permission - message will be given by status_message. - """ - - subnetwork = proto.Field( - proto.STRING, - number=1, - ) - network = proto.Field( - proto.STRING, - number=2, - ) - ip_cidr_range = proto.Field( - proto.STRING, - number=3, - ) - secondary_ip_ranges = proto.RepeatedField( - proto.MESSAGE, - number=4, - message='UsableSubnetworkSecondaryRange', - ) - status_message = proto.Field( - proto.STRING, - number=5, - ) - - -class VerticalPodAutoscaling(proto.Message): - r"""VerticalPodAutoscaling contains global, per-cluster - information required by Vertical Pod Autoscaler to automatically - adjust the resources of pods controlled by it. - - Attributes: - enabled (bool): - Enables vertical pod autoscaling. - """ - - enabled = proto.Field( - proto.BOOL, - number=1, - ) - - -class DefaultSnatStatus(proto.Message): - r"""DefaultSnatStatus contains the desired state of whether - default sNAT should be disabled on the cluster. - - Attributes: - disabled (bool): - Disables cluster default sNAT rules. - """ - - disabled = proto.Field( - proto.BOOL, - number=1, - ) - - -class IntraNodeVisibilityConfig(proto.Message): - r"""IntraNodeVisibilityConfig contains the desired config of the - intra-node visibility on this cluster. - - Attributes: - enabled (bool): - Enables intra node visibility for this - cluster. - """ - - enabled = proto.Field( - proto.BOOL, - number=1, - ) - - -class MaxPodsConstraint(proto.Message): - r"""Constraints applied to pods. - Attributes: - max_pods_per_node (int): - Constraint enforced on the max num of pods - per node. - """ - - max_pods_per_node = proto.Field( - proto.INT64, - number=1, - ) - - -class WorkloadIdentityConfig(proto.Message): - r"""Configuration for the use of Kubernetes Service Accounts in - GCP IAM policies. - - Attributes: - identity_namespace (str): - IAM Identity Namespace to attach all - Kubernetes Service Accounts to. - workload_pool (str): - The workload pool to attach all Kubernetes - service accounts to. - identity_provider (str): - identity provider is the third party identity - provider. - """ - - identity_namespace = proto.Field( - proto.STRING, - number=1, - ) - workload_pool = proto.Field( - proto.STRING, - number=2, - ) - identity_provider = proto.Field( - proto.STRING, - number=3, - ) - - -class DatabaseEncryption(proto.Message): - r"""Configuration of etcd encryption. - Attributes: - state (google.container_v1beta1.types.DatabaseEncryption.State): - Denotes the state of etcd encryption. - key_name (str): - Name of CloudKMS key to use for the - encryption of secrets in etcd. Ex. projects/my- - project/locations/global/keyRings/my- - ring/cryptoKeys/my-key - """ - class State(proto.Enum): - r"""State of etcd encryption.""" - UNKNOWN = 0 - ENCRYPTED = 1 - DECRYPTED = 2 - - state = proto.Field( - proto.ENUM, - number=2, - enum=State, - ) - key_name = proto.Field( - proto.STRING, - number=1, - ) - - -class ResourceUsageExportConfig(proto.Message): - r"""Configuration for exporting cluster resource usages. - Attributes: - bigquery_destination (google.container_v1beta1.types.ResourceUsageExportConfig.BigQueryDestination): - Configuration to use BigQuery as usage export - destination. - enable_network_egress_metering (bool): - Whether to enable network egress metering for - this cluster. If enabled, a daemonset will be - created in the cluster to meter network egress - traffic. - consumption_metering_config (google.container_v1beta1.types.ResourceUsageExportConfig.ConsumptionMeteringConfig): - Configuration to enable resource consumption - metering. - """ - - class BigQueryDestination(proto.Message): - r"""Parameters for using BigQuery as the destination of resource - usage export. - - Attributes: - dataset_id (str): - The ID of a BigQuery Dataset. - """ - - dataset_id = proto.Field( - proto.STRING, - number=1, - ) - - class ConsumptionMeteringConfig(proto.Message): - r"""Parameters for controlling consumption metering. - Attributes: - enabled (bool): - Whether to enable consumption metering for - this cluster. If enabled, a second BigQuery - table will be created to hold resource - consumption records. - """ - - enabled = proto.Field( - proto.BOOL, - number=1, - ) - - bigquery_destination = proto.Field( - proto.MESSAGE, - number=1, - message=BigQueryDestination, - ) - enable_network_egress_metering = proto.Field( - proto.BOOL, - number=2, - ) - consumption_metering_config = proto.Field( - proto.MESSAGE, - number=3, - message=ConsumptionMeteringConfig, - ) - - -class ShieldedNodes(proto.Message): - r"""Configuration of Shielded Nodes feature. - Attributes: - enabled (bool): - Whether Shielded Nodes features are enabled - on all nodes in this cluster. - """ - - enabled = proto.Field( - proto.BOOL, - number=1, - ) - - -class GetOpenIDConfigRequest(proto.Message): - r"""GetOpenIDConfigRequest gets the OIDC discovery document for - the cluster. See the OpenID Connect Discovery 1.0 specification - for details. - - Attributes: - parent (str): - The cluster (project, location, cluster id) to get the - discovery document for. Specified in the format - ``projects/*/locations/*/clusters/*``. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - - -class GetOpenIDConfigResponse(proto.Message): - r"""GetOpenIDConfigResponse is an OIDC discovery document for the - cluster. See the OpenID Connect Discovery 1.0 specification for - details. - - Attributes: - issuer (str): - OIDC Issuer. - jwks_uri (str): - JSON Web Key uri. - response_types_supported (Sequence[str]): - Supported response types. - subject_types_supported (Sequence[str]): - Supported subject types. - id_token_signing_alg_values_supported (Sequence[str]): - supported ID Token signing Algorithms. - claims_supported (Sequence[str]): - Supported claims. - grant_types (Sequence[str]): - Supported grant types. - """ - - issuer = proto.Field( - proto.STRING, - number=1, - ) - jwks_uri = proto.Field( - proto.STRING, - number=2, - ) - response_types_supported = proto.RepeatedField( - proto.STRING, - number=3, - ) - subject_types_supported = proto.RepeatedField( - proto.STRING, - number=4, - ) - id_token_signing_alg_values_supported = proto.RepeatedField( - proto.STRING, - number=5, - ) - claims_supported = proto.RepeatedField( - proto.STRING, - number=6, - ) - grant_types = proto.RepeatedField( - proto.STRING, - number=7, - ) - - -class GetJSONWebKeysRequest(proto.Message): - r"""GetJSONWebKeysRequest gets the public component of the keys used by - the cluster to sign token requests. This will be the jwks_uri for - the discover document returned by getOpenIDConfig. See the OpenID - Connect Discovery 1.0 specification for details. - - Attributes: - parent (str): - The cluster (project, location, cluster id) to get keys for. - Specified in the format - ``projects/*/locations/*/clusters/*``. - """ - - parent = proto.Field( - proto.STRING, - number=1, - ) - - -class Jwk(proto.Message): - r"""Jwk is a JSON Web Key as specified in RFC 7517 - Attributes: - kty (str): - Key Type. - alg (str): - Algorithm. - use (str): - Permitted uses for the public keys. - kid (str): - Key ID. - n (str): - Used for RSA keys. - e (str): - Used for RSA keys. - x (str): - Used for ECDSA keys. - y (str): - Used for ECDSA keys. - crv (str): - Used for ECDSA keys. - """ - - kty = proto.Field( - proto.STRING, - number=1, - ) - alg = proto.Field( - proto.STRING, - number=2, - ) - use = proto.Field( - proto.STRING, - number=3, - ) - kid = proto.Field( - proto.STRING, - number=4, - ) - n = proto.Field( - proto.STRING, - number=5, - ) - e = proto.Field( - proto.STRING, - number=6, - ) - x = proto.Field( - proto.STRING, - number=7, - ) - y = proto.Field( - proto.STRING, - number=8, - ) - crv = proto.Field( - proto.STRING, - number=9, - ) - - -class GetJSONWebKeysResponse(proto.Message): - r"""GetJSONWebKeysResponse is a valid JSON Web Key Set as - specififed in rfc 7517 - - Attributes: - keys (Sequence[google.container_v1beta1.types.Jwk]): - The public component of the keys used by the - cluster to sign token requests. - """ - - keys = proto.RepeatedField( - proto.MESSAGE, - number=1, - message='Jwk', - ) - - -class ReleaseChannel(proto.Message): - r"""ReleaseChannel indicates which release channel a cluster is - subscribed to. Release channels are arranged in order of risk. - When a cluster is subscribed to a release channel, Google - maintains both the master version and the node version. Node - auto-upgrade defaults to true and cannot be disabled. - - Attributes: - channel (google.container_v1beta1.types.ReleaseChannel.Channel): - channel specifies which release channel the - cluster is subscribed to. - """ - class Channel(proto.Enum): - r"""Possible values for 'channel'.""" - UNSPECIFIED = 0 - RAPID = 1 - REGULAR = 2 - STABLE = 3 - - channel = proto.Field( - proto.ENUM, - number=1, - enum=Channel, - ) - - -class TpuConfig(proto.Message): - r"""Configuration for Cloud TPU. - Attributes: - enabled (bool): - Whether Cloud TPU integration is enabled or - not. - use_service_networking (bool): - Whether to use service networking for Cloud - TPU or not. - ipv4_cidr_block (str): - IPv4 CIDR block reserved for Cloud TPU in the - VPC. - """ - - enabled = proto.Field( - proto.BOOL, - number=1, - ) - use_service_networking = proto.Field( - proto.BOOL, - number=2, - ) - ipv4_cidr_block = proto.Field( - proto.STRING, - number=3, - ) - - -class Master(proto.Message): - r"""Master is the configuration for components on master. """ - - -class NotificationConfig(proto.Message): - r"""NotificationConfig is the configuration of notifications. - Attributes: - pubsub (google.container_v1beta1.types.NotificationConfig.PubSub): - Notification config for Pub/Sub. - """ - - class PubSub(proto.Message): - r"""Pub/Sub specific notification config. - Attributes: - enabled (bool): - Enable notifications for Pub/Sub. - topic (str): - The desired Pub/Sub topic to which notifications will be - sent by GKE. Format is - ``projects/{project}/topics/{topic}``. - """ - - enabled = proto.Field( - proto.BOOL, - number=1, - ) - topic = proto.Field( - proto.STRING, - number=2, - ) - - pubsub = proto.Field( - proto.MESSAGE, - number=1, - message=PubSub, - ) - - -class ConfidentialNodes(proto.Message): - r"""ConfidentialNodes is configuration for the confidential nodes - feature, which makes nodes run on confidential VMs. - - Attributes: - enabled (bool): - Whether Confidential Nodes feature is enabled - for all nodes in this cluster. - """ - - enabled = proto.Field( - proto.BOOL, - number=1, - ) - - -class UpgradeEvent(proto.Message): - r"""UpgradeEvent is a notification sent to customers by the - cluster server when a resource is upgrading. - - Attributes: - resource_type (google.container_v1beta1.types.UpgradeResourceType): - Required. The resource type that is - upgrading. - operation (str): - Required. The operation associated with this - upgrade. - operation_start_time (google.protobuf.timestamp_pb2.Timestamp): - Required. The time when the operation was - started. - current_version (str): - Required. The current version before the - upgrade. - target_version (str): - Required. The target version for the upgrade. - resource (str): - Optional. Optional relative path to the - resource. For example in node pool upgrades, the - relative path of the node pool. - """ - - resource_type = proto.Field( - proto.ENUM, - number=1, - enum='UpgradeResourceType', - ) - operation = proto.Field( - proto.STRING, - number=2, - ) - operation_start_time = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - current_version = proto.Field( - proto.STRING, - number=4, - ) - target_version = proto.Field( - proto.STRING, - number=5, - ) - resource = proto.Field( - proto.STRING, - number=6, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1beta1/mypy.ini b/owl-bot-staging/v1beta1/mypy.ini deleted file mode 100644 index 4505b485..00000000 --- a/owl-bot-staging/v1beta1/mypy.ini +++ /dev/null @@ -1,3 +0,0 @@ -[mypy] -python_version = 3.6 -namespace_packages = True diff --git a/owl-bot-staging/v1beta1/noxfile.py b/owl-bot-staging/v1beta1/noxfile.py deleted file mode 100644 index b289931c..00000000 --- a/owl-bot-staging/v1beta1/noxfile.py +++ /dev/null @@ -1,132 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import pathlib -import shutil -import subprocess -import sys - - -import nox # type: ignore - -CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() - -LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt" -PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8") - - -nox.sessions = [ - "unit", - "cover", - "mypy", - "check_lower_bounds" - # exclude update_lower_bounds from default - "docs", -] - -@nox.session(python=['3.6', '3.7', '3.8', '3.9']) -def unit(session): - """Run the unit test suite.""" - - session.install('coverage', 'pytest', 'pytest-cov', 'asyncmock', 'pytest-asyncio') - session.install('-e', '.') - - session.run( - 'py.test', - '--quiet', - '--cov=google/container_v1beta1/', - '--cov-config=.coveragerc', - '--cov-report=term', - '--cov-report=html', - os.path.join('tests', 'unit', ''.join(session.posargs)) - ) - - -@nox.session(python='3.7') -def cover(session): - """Run the final coverage report. - This outputs the coverage report aggregating coverage from the unit - test runs (not system test runs), and then erases coverage data. - """ - session.install("coverage", "pytest-cov") - session.run("coverage", "report", "--show-missing", "--fail-under=100") - - session.run("coverage", "erase") - - -@nox.session(python=['3.6', '3.7']) -def mypy(session): - """Run the type checker.""" - session.install('mypy', 'types-pkg_resources') - session.install('.') - session.run( - 'mypy', - '--explicit-package-bases', - 'google', - ) - - -@nox.session -def update_lower_bounds(session): - """Update lower bounds in constraints.txt to match setup.py""" - session.install('google-cloud-testutils') - session.install('.') - - session.run( - 'lower-bound-checker', - 'update', - '--package-name', - PACKAGE_NAME, - '--constraints-file', - str(LOWER_BOUND_CONSTRAINTS_FILE), - ) - - -@nox.session -def check_lower_bounds(session): - """Check lower bounds in setup.py are reflected in constraints file""" - session.install('google-cloud-testutils') - session.install('.') - - session.run( - 'lower-bound-checker', - 'check', - '--package-name', - PACKAGE_NAME, - '--constraints-file', - str(LOWER_BOUND_CONSTRAINTS_FILE), - ) - -@nox.session(python='3.6') -def docs(session): - """Build the docs for this library.""" - - session.install("-e", ".") - session.install("sphinx<3.0.0", "alabaster", "recommonmark") - - shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) - session.run( - "sphinx-build", - "-W", # warnings as errors - "-T", # show full traceback on exception - "-N", # no colors - "-b", - "html", - "-d", - os.path.join("docs", "_build", "doctrees", ""), - os.path.join("docs", ""), - os.path.join("docs", "_build", "html", ""), - ) diff --git a/owl-bot-staging/v1beta1/scripts/fixup_container_v1beta1_keywords.py b/owl-bot-staging/v1beta1/scripts/fixup_container_v1beta1_keywords.py deleted file mode 100644 index fcd6cd36..00000000 --- a/owl-bot-staging/v1beta1/scripts/fixup_container_v1beta1_keywords.py +++ /dev/null @@ -1,208 +0,0 @@ -#! /usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import argparse -import os -import libcst as cst -import pathlib -import sys -from typing import (Any, Callable, Dict, List, Sequence, Tuple) - - -def partition( - predicate: Callable[[Any], bool], - iterator: Sequence[Any] -) -> Tuple[List[Any], List[Any]]: - """A stable, out-of-place partition.""" - results = ([], []) - - for i in iterator: - results[int(predicate(i))].append(i) - - # Returns trueList, falseList - return results[1], results[0] - - -class containerCallTransformer(cst.CSTTransformer): - CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') - METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { - 'cancel_operation': ('project_id', 'zone', 'operation_id', 'name', ), - 'complete_ip_rotation': ('project_id', 'zone', 'cluster_id', 'name', ), - 'create_cluster': ('project_id', 'zone', 'cluster', 'parent', ), - 'create_node_pool': ('project_id', 'zone', 'cluster_id', 'node_pool', 'parent', ), - 'delete_cluster': ('project_id', 'zone', 'cluster_id', 'name', ), - 'delete_node_pool': ('project_id', 'zone', 'cluster_id', 'node_pool_id', 'name', ), - 'get_cluster': ('project_id', 'zone', 'cluster_id', 'name', ), - 'get_json_web_keys': ('parent', ), - 'get_node_pool': ('project_id', 'zone', 'cluster_id', 'node_pool_id', 'name', ), - 'get_operation': ('project_id', 'zone', 'operation_id', 'name', ), - 'get_server_config': ('project_id', 'zone', 'name', ), - 'list_clusters': ('project_id', 'zone', 'parent', ), - 'list_locations': ('parent', ), - 'list_node_pools': ('project_id', 'zone', 'cluster_id', 'parent', ), - 'list_operations': ('project_id', 'zone', 'parent', ), - 'list_usable_subnetworks': ('parent', 'filter', 'page_size', 'page_token', ), - 'rollback_node_pool_upgrade': ('project_id', 'zone', 'cluster_id', 'node_pool_id', 'name', ), - 'set_addons_config': ('project_id', 'zone', 'cluster_id', 'addons_config', 'name', ), - 'set_labels': ('project_id', 'zone', 'cluster_id', 'resource_labels', 'label_fingerprint', 'name', ), - 'set_legacy_abac': ('project_id', 'zone', 'cluster_id', 'enabled', 'name', ), - 'set_locations': ('project_id', 'zone', 'cluster_id', 'locations', 'name', ), - 'set_logging_service': ('project_id', 'zone', 'cluster_id', 'logging_service', 'name', ), - 'set_maintenance_policy': ('project_id', 'zone', 'cluster_id', 'maintenance_policy', 'name', ), - 'set_master_auth': ('project_id', 'zone', 'cluster_id', 'action', 'update', 'name', ), - 'set_monitoring_service': ('project_id', 'zone', 'cluster_id', 'monitoring_service', 'name', ), - 'set_network_policy': ('project_id', 'zone', 'cluster_id', 'network_policy', 'name', ), - 'set_node_pool_autoscaling': ('project_id', 'zone', 'cluster_id', 'node_pool_id', 'autoscaling', 'name', ), - 'set_node_pool_management': ('project_id', 'zone', 'cluster_id', 'node_pool_id', 'management', 'name', ), - 'set_node_pool_size': ('project_id', 'zone', 'cluster_id', 'node_pool_id', 'node_count', 'name', ), - 'start_ip_rotation': ('project_id', 'zone', 'cluster_id', 'name', 'rotate_credentials', ), - 'update_cluster': ('project_id', 'zone', 'cluster_id', 'update', 'name', ), - 'update_master': ('project_id', 'zone', 'cluster_id', 'master_version', 'name', ), - 'update_node_pool': ('project_id', 'zone', 'cluster_id', 'node_pool_id', 'node_version', 'image_type', 'locations', 'workload_metadata_config', 'name', 'upgrade_settings', 'linux_node_config', 'kubelet_config', ), - } - - def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: - try: - key = original.func.attr.value - kword_params = self.METHOD_TO_PARAMS[key] - except (AttributeError, KeyError): - # Either not a method from the API or too convoluted to be sure. - return updated - - # If the existing code is valid, keyword args come after positional args. - # Therefore, all positional args must map to the first parameters. - args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) - if any(k.keyword.value == "request" for k in kwargs): - # We've already fixed this file, don't fix it again. - return updated - - kwargs, ctrl_kwargs = partition( - lambda a: not a.keyword.value in self.CTRL_PARAMS, - kwargs - ) - - args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] - ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) - for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) - - request_arg = cst.Arg( - value=cst.Dict([ - cst.DictElement( - cst.SimpleString("'{}'".format(name)), -cst.Element(value=arg.value) - ) - # Note: the args + kwargs looks silly, but keep in mind that - # the control parameters had to be stripped out, and that - # those could have been passed positionally or by keyword. - for name, arg in zip(kword_params, args + kwargs)]), - keyword=cst.Name("request") - ) - - return updated.with_changes( - args=[request_arg] + ctrl_kwargs - ) - - -def fix_files( - in_dir: pathlib.Path, - out_dir: pathlib.Path, - *, - transformer=containerCallTransformer(), -): - """Duplicate the input dir to the output dir, fixing file method calls. - - Preconditions: - * in_dir is a real directory - * out_dir is a real, empty directory - """ - pyfile_gen = ( - pathlib.Path(os.path.join(root, f)) - for root, _, files in os.walk(in_dir) - for f in files if os.path.splitext(f)[1] == ".py" - ) - - for fpath in pyfile_gen: - with open(fpath, 'r') as f: - src = f.read() - - # Parse the code and insert method call fixes. - tree = cst.parse_module(src) - updated = tree.visit(transformer) - - # Create the path and directory structure for the new file. - updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) - updated_path.parent.mkdir(parents=True, exist_ok=True) - - # Generate the updated source file at the corresponding path. - with open(updated_path, 'w') as f: - f.write(updated.code) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description="""Fix up source that uses the container client library. - -The existing sources are NOT overwritten but are copied to output_dir with changes made. - -Note: This tool operates at a best-effort level at converting positional - parameters in client method calls to keyword based parameters. - Cases where it WILL FAIL include - A) * or ** expansion in a method call. - B) Calls via function or method alias (includes free function calls) - C) Indirect or dispatched calls (e.g. the method is looked up dynamically) - - These all constitute false negatives. The tool will also detect false - positives when an API method shares a name with another method. -""") - parser.add_argument( - '-d', - '--input-directory', - required=True, - dest='input_dir', - help='the input directory to walk for python files to fix up', - ) - parser.add_argument( - '-o', - '--output-directory', - required=True, - dest='output_dir', - help='the directory to output files fixed via un-flattening', - ) - args = parser.parse_args() - input_dir = pathlib.Path(args.input_dir) - output_dir = pathlib.Path(args.output_dir) - if not input_dir.is_dir(): - print( - f"input directory '{input_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if not output_dir.is_dir(): - print( - f"output directory '{output_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if os.listdir(output_dir): - print( - f"output directory '{output_dir}' is not empty", - file=sys.stderr, - ) - sys.exit(-1) - - fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1beta1/setup.py b/owl-bot-staging/v1beta1/setup.py deleted file mode 100644 index eb059ee8..00000000 --- a/owl-bot-staging/v1beta1/setup.py +++ /dev/null @@ -1,54 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import io -import os -import setuptools # type: ignore - -version = '0.1.0' - -package_root = os.path.abspath(os.path.dirname(__file__)) - -readme_filename = os.path.join(package_root, 'README.rst') -with io.open(readme_filename, encoding='utf-8') as readme_file: - readme = readme_file.read() - -setuptools.setup( - name='google-container', - version=version, - long_description=readme, - packages=setuptools.PEP420PackageFinder.find(), - namespace_packages=('google',), - platforms='Posix; MacOS X; Windows', - include_package_data=True, - install_requires=( - 'google-api-core[grpc] >= 1.27.0, < 3.0.0dev', - 'libcst >= 0.2.5', - 'proto-plus >= 1.15.0', - 'packaging >= 14.3', ), - python_requires='>=3.6', - classifiers=[ - 'Development Status :: 3 - Alpha', - 'Intended Audience :: Developers', - 'Operating System :: OS Independent', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', - 'Programming Language :: Python :: 3.8', - 'Programming Language :: Python :: 3.9', - 'Topic :: Internet', - 'Topic :: Software Development :: Libraries :: Python Modules', - ], - zip_safe=False, -) diff --git a/owl-bot-staging/v1beta1/tests/__init__.py b/owl-bot-staging/v1beta1/tests/__init__.py deleted file mode 100644 index b54a5fcc..00000000 --- a/owl-bot-staging/v1beta1/tests/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/tests/unit/__init__.py b/owl-bot-staging/v1beta1/tests/unit/__init__.py deleted file mode 100644 index b54a5fcc..00000000 --- a/owl-bot-staging/v1beta1/tests/unit/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/__init__.py b/owl-bot-staging/v1beta1/tests/unit/gapic/__init__.py deleted file mode 100644 index b54a5fcc..00000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/container_v1beta1/__init__.py b/owl-bot-staging/v1beta1/tests/unit/gapic/container_v1beta1/__init__.py deleted file mode 100644 index b54a5fcc..00000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/container_v1beta1/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/v1beta1/tests/unit/gapic/container_v1beta1/test_cluster_manager.py b/owl-bot-staging/v1beta1/tests/unit/gapic/container_v1beta1/test_cluster_manager.py deleted file mode 100644 index 55a4b87b..00000000 --- a/owl-bot-staging/v1beta1/tests/unit/gapic/container_v1beta1/test_cluster_manager.py +++ /dev/null @@ -1,9846 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import mock -import packaging.version - -import grpc -from grpc.experimental import aio -import math -import pytest -from proto.marshal.rules.dates import DurationRule, TimestampRule - - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.container_v1beta1.services.cluster_manager import ClusterManagerAsyncClient -from google.container_v1beta1.services.cluster_manager import ClusterManagerClient -from google.container_v1beta1.services.cluster_manager import pagers -from google.container_v1beta1.services.cluster_manager import transports -from google.container_v1beta1.services.cluster_manager.transports.base import _GOOGLE_AUTH_VERSION -from google.container_v1beta1.types import cluster_service -from google.oauth2 import service_account -from google.protobuf import timestamp_pb2 # type: ignore -from google.protobuf import wrappers_pb2 # type: ignore -from google.rpc import code_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore -import google.auth - - -# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively -# through google-api-core: -# - Delete the auth "less than" test cases -# - Delete these pytest markers (Make the "greater than or equal to" tests the default). -requires_google_auth_lt_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"), - reason="This test requires google-auth < 1.25.0", -) -requires_google_auth_gte_1_25_0 = pytest.mark.skipif( - packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"), - reason="This test requires google-auth >= 1.25.0", -) - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert ClusterManagerClient._get_default_mtls_endpoint(None) is None - assert ClusterManagerClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert ClusterManagerClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert ClusterManagerClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert ClusterManagerClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert ClusterManagerClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - - -@pytest.mark.parametrize("client_class", [ - ClusterManagerClient, - ClusterManagerAsyncClient, -]) -def test_cluster_manager_client_from_service_account_info(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'container.googleapis.com:443' - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.ClusterManagerGrpcTransport, "grpc"), - (transports.ClusterManagerGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_cluster_manager_client_service_account_always_use_jwt(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=False) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("client_class", [ - ClusterManagerClient, - ClusterManagerAsyncClient, -]) -def test_cluster_manager_client_from_service_account_file(client_class): - creds = ga_credentials.AnonymousCredentials() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json") - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == 'container.googleapis.com:443' - - -def test_cluster_manager_client_get_transport_class(): - transport = ClusterManagerClient.get_transport_class() - available_transports = [ - transports.ClusterManagerGrpcTransport, - ] - assert transport in available_transports - - transport = ClusterManagerClient.get_transport_class("grpc") - assert transport == transports.ClusterManagerGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (ClusterManagerClient, transports.ClusterManagerGrpcTransport, "grpc"), - (ClusterManagerAsyncClient, transports.ClusterManagerGrpcAsyncIOTransport, "grpc_asyncio"), -]) -@mock.patch.object(ClusterManagerClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ClusterManagerClient)) -@mock.patch.object(ClusterManagerAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ClusterManagerAsyncClient)) -def test_cluster_manager_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(ClusterManagerClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(ClusterManagerClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): - client = client_class() - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError): - client = client_class() - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (ClusterManagerClient, transports.ClusterManagerGrpcTransport, "grpc", "true"), - (ClusterManagerAsyncClient, transports.ClusterManagerGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (ClusterManagerClient, transports.ClusterManagerGrpcTransport, "grpc", "false"), - (ClusterManagerAsyncClient, transports.ClusterManagerGrpcAsyncIOTransport, "grpc_asyncio", "false"), -]) -@mock.patch.object(ClusterManagerClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ClusterManagerClient)) -@mock.patch.object(ClusterManagerAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ClusterManagerAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_cluster_manager_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class() - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (ClusterManagerClient, transports.ClusterManagerGrpcTransport, "grpc"), - (ClusterManagerAsyncClient, transports.ClusterManagerGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_cluster_manager_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_ENDPOINT, - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (ClusterManagerClient, transports.ClusterManagerGrpcTransport, "grpc"), - (ClusterManagerAsyncClient, transports.ClusterManagerGrpcAsyncIOTransport, "grpc_asyncio"), -]) -def test_cluster_manager_client_client_options_credentials_file(client_class, transport_class, transport_name): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_cluster_manager_client_client_options_from_dict(): - with mock.patch('google.container_v1beta1.services.cluster_manager.transports.ClusterManagerGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = ClusterManagerClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - ) - - -def test_list_clusters(transport: str = 'grpc', request_type=cluster_service.ListClustersRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_clusters), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.ListClustersResponse( - missing_zones=['missing_zones_value'], - ) - response = client.list_clusters(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.ListClustersRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.ListClustersResponse) - assert response.missing_zones == ['missing_zones_value'] - - -def test_list_clusters_from_dict(): - test_list_clusters(request_type=dict) - - -def test_list_clusters_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_clusters), - '__call__') as call: - client.list_clusters() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.ListClustersRequest() - - -@pytest.mark.asyncio -async def test_list_clusters_async(transport: str = 'grpc_asyncio', request_type=cluster_service.ListClustersRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_clusters), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListClustersResponse( - missing_zones=['missing_zones_value'], - )) - response = await client.list_clusters(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.ListClustersRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.ListClustersResponse) - assert response.missing_zones == ['missing_zones_value'] - - -@pytest.mark.asyncio -async def test_list_clusters_async_from_dict(): - await test_list_clusters_async(request_type=dict) - - -def test_list_clusters_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.ListClustersRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_clusters), - '__call__') as call: - call.return_value = cluster_service.ListClustersResponse() - client.list_clusters(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_clusters_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.ListClustersRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_clusters), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListClustersResponse()) - await client.list_clusters(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_clusters_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_clusters), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.ListClustersResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_clusters( - project_id='project_id_value', - zone='zone_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - - -def test_list_clusters_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_clusters( - cluster_service.ListClustersRequest(), - project_id='project_id_value', - zone='zone_value', - ) - - -@pytest.mark.asyncio -async def test_list_clusters_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_clusters), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.ListClustersResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListClustersResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_clusters( - project_id='project_id_value', - zone='zone_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - - -@pytest.mark.asyncio -async def test_list_clusters_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_clusters( - cluster_service.ListClustersRequest(), - project_id='project_id_value', - zone='zone_value', - ) - - -def test_get_cluster(transport: str = 'grpc', request_type=cluster_service.GetClusterRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Cluster( - name='name_value', - description='description_value', - initial_node_count=1911, - logging_service='logging_service_value', - monitoring_service='monitoring_service_value', - network='network_value', - cluster_ipv4_cidr='cluster_ipv4_cidr_value', - subnetwork='subnetwork_value', - locations=['locations_value'], - enable_kubernetes_alpha=True, - label_fingerprint='label_fingerprint_value', - private_cluster=True, - master_ipv4_cidr_block='master_ipv4_cidr_block_value', - self_link='self_link_value', - zone='zone_value', - endpoint='endpoint_value', - initial_cluster_version='initial_cluster_version_value', - current_master_version='current_master_version_value', - current_node_version='current_node_version_value', - create_time='create_time_value', - status=cluster_service.Cluster.Status.PROVISIONING, - status_message='status_message_value', - node_ipv4_cidr_size=1955, - services_ipv4_cidr='services_ipv4_cidr_value', - instance_group_urls=['instance_group_urls_value'], - current_node_count=1936, - expire_time='expire_time_value', - location='location_value', - enable_tpu=True, - tpu_ipv4_cidr_block='tpu_ipv4_cidr_block_value', - ) - response = client.get_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.GetClusterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Cluster) - assert response.name == 'name_value' - assert response.description == 'description_value' - assert response.initial_node_count == 1911 - assert response.logging_service == 'logging_service_value' - assert response.monitoring_service == 'monitoring_service_value' - assert response.network == 'network_value' - assert response.cluster_ipv4_cidr == 'cluster_ipv4_cidr_value' - assert response.subnetwork == 'subnetwork_value' - assert response.locations == ['locations_value'] - assert response.enable_kubernetes_alpha is True - assert response.label_fingerprint == 'label_fingerprint_value' - assert response.private_cluster is True - assert response.master_ipv4_cidr_block == 'master_ipv4_cidr_block_value' - assert response.self_link == 'self_link_value' - assert response.zone == 'zone_value' - assert response.endpoint == 'endpoint_value' - assert response.initial_cluster_version == 'initial_cluster_version_value' - assert response.current_master_version == 'current_master_version_value' - assert response.current_node_version == 'current_node_version_value' - assert response.create_time == 'create_time_value' - assert response.status == cluster_service.Cluster.Status.PROVISIONING - assert response.status_message == 'status_message_value' - assert response.node_ipv4_cidr_size == 1955 - assert response.services_ipv4_cidr == 'services_ipv4_cidr_value' - assert response.instance_group_urls == ['instance_group_urls_value'] - assert response.current_node_count == 1936 - assert response.expire_time == 'expire_time_value' - assert response.location == 'location_value' - assert response.enable_tpu is True - assert response.tpu_ipv4_cidr_block == 'tpu_ipv4_cidr_block_value' - - -def test_get_cluster_from_dict(): - test_get_cluster(request_type=dict) - - -def test_get_cluster_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_cluster), - '__call__') as call: - client.get_cluster() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.GetClusterRequest() - - -@pytest.mark.asyncio -async def test_get_cluster_async(transport: str = 'grpc_asyncio', request_type=cluster_service.GetClusterRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Cluster( - name='name_value', - description='description_value', - initial_node_count=1911, - logging_service='logging_service_value', - monitoring_service='monitoring_service_value', - network='network_value', - cluster_ipv4_cidr='cluster_ipv4_cidr_value', - subnetwork='subnetwork_value', - locations=['locations_value'], - enable_kubernetes_alpha=True, - label_fingerprint='label_fingerprint_value', - private_cluster=True, - master_ipv4_cidr_block='master_ipv4_cidr_block_value', - self_link='self_link_value', - zone='zone_value', - endpoint='endpoint_value', - initial_cluster_version='initial_cluster_version_value', - current_master_version='current_master_version_value', - current_node_version='current_node_version_value', - create_time='create_time_value', - status=cluster_service.Cluster.Status.PROVISIONING, - status_message='status_message_value', - node_ipv4_cidr_size=1955, - services_ipv4_cidr='services_ipv4_cidr_value', - instance_group_urls=['instance_group_urls_value'], - current_node_count=1936, - expire_time='expire_time_value', - location='location_value', - enable_tpu=True, - tpu_ipv4_cidr_block='tpu_ipv4_cidr_block_value', - )) - response = await client.get_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.GetClusterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Cluster) - assert response.name == 'name_value' - assert response.description == 'description_value' - assert response.initial_node_count == 1911 - assert response.logging_service == 'logging_service_value' - assert response.monitoring_service == 'monitoring_service_value' - assert response.network == 'network_value' - assert response.cluster_ipv4_cidr == 'cluster_ipv4_cidr_value' - assert response.subnetwork == 'subnetwork_value' - assert response.locations == ['locations_value'] - assert response.enable_kubernetes_alpha is True - assert response.label_fingerprint == 'label_fingerprint_value' - assert response.private_cluster is True - assert response.master_ipv4_cidr_block == 'master_ipv4_cidr_block_value' - assert response.self_link == 'self_link_value' - assert response.zone == 'zone_value' - assert response.endpoint == 'endpoint_value' - assert response.initial_cluster_version == 'initial_cluster_version_value' - assert response.current_master_version == 'current_master_version_value' - assert response.current_node_version == 'current_node_version_value' - assert response.create_time == 'create_time_value' - assert response.status == cluster_service.Cluster.Status.PROVISIONING - assert response.status_message == 'status_message_value' - assert response.node_ipv4_cidr_size == 1955 - assert response.services_ipv4_cidr == 'services_ipv4_cidr_value' - assert response.instance_group_urls == ['instance_group_urls_value'] - assert response.current_node_count == 1936 - assert response.expire_time == 'expire_time_value' - assert response.location == 'location_value' - assert response.enable_tpu is True - assert response.tpu_ipv4_cidr_block == 'tpu_ipv4_cidr_block_value' - - -@pytest.mark.asyncio -async def test_get_cluster_async_from_dict(): - await test_get_cluster_async(request_type=dict) - - -def test_get_cluster_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.GetClusterRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_cluster), - '__call__') as call: - call.return_value = cluster_service.Cluster() - client.get_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_cluster_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.GetClusterRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_cluster), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Cluster()) - await client.get_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_cluster_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Cluster() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_cluster( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - - -def test_get_cluster_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_cluster( - cluster_service.GetClusterRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - ) - - -@pytest.mark.asyncio -async def test_get_cluster_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Cluster() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Cluster()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_cluster( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - - -@pytest.mark.asyncio -async def test_get_cluster_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_cluster( - cluster_service.GetClusterRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - ) - - -def test_create_cluster(transport: str = 'grpc', request_type=cluster_service.CreateClusterRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - ) - response = client.create_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.CreateClusterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -def test_create_cluster_from_dict(): - test_create_cluster(request_type=dict) - - -def test_create_cluster_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_cluster), - '__call__') as call: - client.create_cluster() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.CreateClusterRequest() - - -@pytest.mark.asyncio -async def test_create_cluster_async(transport: str = 'grpc_asyncio', request_type=cluster_service.CreateClusterRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - )) - response = await client.create_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.CreateClusterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -@pytest.mark.asyncio -async def test_create_cluster_async_from_dict(): - await test_create_cluster_async(request_type=dict) - - -def test_create_cluster_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.CreateClusterRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_cluster), - '__call__') as call: - call.return_value = cluster_service.Operation() - client.create_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_cluster_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.CreateClusterRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_cluster), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - await client.create_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_cluster_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_cluster( - project_id='project_id_value', - zone='zone_value', - cluster=cluster_service.Cluster(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster == cluster_service.Cluster(name='name_value') - - -def test_create_cluster_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_cluster( - cluster_service.CreateClusterRequest(), - project_id='project_id_value', - zone='zone_value', - cluster=cluster_service.Cluster(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_cluster_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_cluster( - project_id='project_id_value', - zone='zone_value', - cluster=cluster_service.Cluster(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster == cluster_service.Cluster(name='name_value') - - -@pytest.mark.asyncio -async def test_create_cluster_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_cluster( - cluster_service.CreateClusterRequest(), - project_id='project_id_value', - zone='zone_value', - cluster=cluster_service.Cluster(name='name_value'), - ) - - -def test_update_cluster(transport: str = 'grpc', request_type=cluster_service.UpdateClusterRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - ) - response = client.update_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.UpdateClusterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -def test_update_cluster_from_dict(): - test_update_cluster(request_type=dict) - - -def test_update_cluster_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_cluster), - '__call__') as call: - client.update_cluster() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.UpdateClusterRequest() - - -@pytest.mark.asyncio -async def test_update_cluster_async(transport: str = 'grpc_asyncio', request_type=cluster_service.UpdateClusterRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - )) - response = await client.update_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.UpdateClusterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -@pytest.mark.asyncio -async def test_update_cluster_async_from_dict(): - await test_update_cluster_async(request_type=dict) - - -def test_update_cluster_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.UpdateClusterRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_cluster), - '__call__') as call: - call.return_value = cluster_service.Operation() - client.update_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_cluster_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.UpdateClusterRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_cluster), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - await client.update_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_update_cluster_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_cluster( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - update=cluster_service.ClusterUpdate(desired_node_version='desired_node_version_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].update == cluster_service.ClusterUpdate(desired_node_version='desired_node_version_value') - - -def test_update_cluster_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_cluster( - cluster_service.UpdateClusterRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - update=cluster_service.ClusterUpdate(desired_node_version='desired_node_version_value'), - ) - - -@pytest.mark.asyncio -async def test_update_cluster_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_cluster( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - update=cluster_service.ClusterUpdate(desired_node_version='desired_node_version_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].update == cluster_service.ClusterUpdate(desired_node_version='desired_node_version_value') - - -@pytest.mark.asyncio -async def test_update_cluster_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_cluster( - cluster_service.UpdateClusterRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - update=cluster_service.ClusterUpdate(desired_node_version='desired_node_version_value'), - ) - - -def test_update_node_pool(transport: str = 'grpc', request_type=cluster_service.UpdateNodePoolRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_node_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - ) - response = client.update_node_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.UpdateNodePoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -def test_update_node_pool_from_dict(): - test_update_node_pool(request_type=dict) - - -def test_update_node_pool_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_node_pool), - '__call__') as call: - client.update_node_pool() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.UpdateNodePoolRequest() - - -@pytest.mark.asyncio -async def test_update_node_pool_async(transport: str = 'grpc_asyncio', request_type=cluster_service.UpdateNodePoolRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_node_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - )) - response = await client.update_node_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.UpdateNodePoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -@pytest.mark.asyncio -async def test_update_node_pool_async_from_dict(): - await test_update_node_pool_async(request_type=dict) - - -def test_update_node_pool_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.UpdateNodePoolRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_node_pool), - '__call__') as call: - call.return_value = cluster_service.Operation() - client.update_node_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_node_pool_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.UpdateNodePoolRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_node_pool), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - await client.update_node_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_set_node_pool_autoscaling(transport: str = 'grpc', request_type=cluster_service.SetNodePoolAutoscalingRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_node_pool_autoscaling), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - ) - response = client.set_node_pool_autoscaling(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetNodePoolAutoscalingRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -def test_set_node_pool_autoscaling_from_dict(): - test_set_node_pool_autoscaling(request_type=dict) - - -def test_set_node_pool_autoscaling_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_node_pool_autoscaling), - '__call__') as call: - client.set_node_pool_autoscaling() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetNodePoolAutoscalingRequest() - - -@pytest.mark.asyncio -async def test_set_node_pool_autoscaling_async(transport: str = 'grpc_asyncio', request_type=cluster_service.SetNodePoolAutoscalingRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_node_pool_autoscaling), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - )) - response = await client.set_node_pool_autoscaling(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetNodePoolAutoscalingRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -@pytest.mark.asyncio -async def test_set_node_pool_autoscaling_async_from_dict(): - await test_set_node_pool_autoscaling_async(request_type=dict) - - -def test_set_node_pool_autoscaling_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.SetNodePoolAutoscalingRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_node_pool_autoscaling), - '__call__') as call: - call.return_value = cluster_service.Operation() - client.set_node_pool_autoscaling(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_set_node_pool_autoscaling_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.SetNodePoolAutoscalingRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_node_pool_autoscaling), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - await client.set_node_pool_autoscaling(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_set_logging_service(transport: str = 'grpc', request_type=cluster_service.SetLoggingServiceRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_logging_service), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - ) - response = client.set_logging_service(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetLoggingServiceRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -def test_set_logging_service_from_dict(): - test_set_logging_service(request_type=dict) - - -def test_set_logging_service_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_logging_service), - '__call__') as call: - client.set_logging_service() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetLoggingServiceRequest() - - -@pytest.mark.asyncio -async def test_set_logging_service_async(transport: str = 'grpc_asyncio', request_type=cluster_service.SetLoggingServiceRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_logging_service), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - )) - response = await client.set_logging_service(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetLoggingServiceRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -@pytest.mark.asyncio -async def test_set_logging_service_async_from_dict(): - await test_set_logging_service_async(request_type=dict) - - -def test_set_logging_service_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.SetLoggingServiceRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_logging_service), - '__call__') as call: - call.return_value = cluster_service.Operation() - client.set_logging_service(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_set_logging_service_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.SetLoggingServiceRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_logging_service), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - await client.set_logging_service(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_set_logging_service_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_logging_service), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.set_logging_service( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - logging_service='logging_service_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].logging_service == 'logging_service_value' - - -def test_set_logging_service_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.set_logging_service( - cluster_service.SetLoggingServiceRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - logging_service='logging_service_value', - ) - - -@pytest.mark.asyncio -async def test_set_logging_service_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_logging_service), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.set_logging_service( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - logging_service='logging_service_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].logging_service == 'logging_service_value' - - -@pytest.mark.asyncio -async def test_set_logging_service_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.set_logging_service( - cluster_service.SetLoggingServiceRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - logging_service='logging_service_value', - ) - - -def test_set_monitoring_service(transport: str = 'grpc', request_type=cluster_service.SetMonitoringServiceRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_monitoring_service), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - ) - response = client.set_monitoring_service(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetMonitoringServiceRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -def test_set_monitoring_service_from_dict(): - test_set_monitoring_service(request_type=dict) - - -def test_set_monitoring_service_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_monitoring_service), - '__call__') as call: - client.set_monitoring_service() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetMonitoringServiceRequest() - - -@pytest.mark.asyncio -async def test_set_monitoring_service_async(transport: str = 'grpc_asyncio', request_type=cluster_service.SetMonitoringServiceRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_monitoring_service), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - )) - response = await client.set_monitoring_service(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetMonitoringServiceRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -@pytest.mark.asyncio -async def test_set_monitoring_service_async_from_dict(): - await test_set_monitoring_service_async(request_type=dict) - - -def test_set_monitoring_service_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.SetMonitoringServiceRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_monitoring_service), - '__call__') as call: - call.return_value = cluster_service.Operation() - client.set_monitoring_service(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_set_monitoring_service_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.SetMonitoringServiceRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_monitoring_service), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - await client.set_monitoring_service(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_set_monitoring_service_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_monitoring_service), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.set_monitoring_service( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - monitoring_service='monitoring_service_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].monitoring_service == 'monitoring_service_value' - - -def test_set_monitoring_service_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.set_monitoring_service( - cluster_service.SetMonitoringServiceRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - monitoring_service='monitoring_service_value', - ) - - -@pytest.mark.asyncio -async def test_set_monitoring_service_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_monitoring_service), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.set_monitoring_service( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - monitoring_service='monitoring_service_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].monitoring_service == 'monitoring_service_value' - - -@pytest.mark.asyncio -async def test_set_monitoring_service_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.set_monitoring_service( - cluster_service.SetMonitoringServiceRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - monitoring_service='monitoring_service_value', - ) - - -def test_set_addons_config(transport: str = 'grpc', request_type=cluster_service.SetAddonsConfigRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_addons_config), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - ) - response = client.set_addons_config(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetAddonsConfigRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -def test_set_addons_config_from_dict(): - test_set_addons_config(request_type=dict) - - -def test_set_addons_config_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_addons_config), - '__call__') as call: - client.set_addons_config() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetAddonsConfigRequest() - - -@pytest.mark.asyncio -async def test_set_addons_config_async(transport: str = 'grpc_asyncio', request_type=cluster_service.SetAddonsConfigRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_addons_config), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - )) - response = await client.set_addons_config(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetAddonsConfigRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -@pytest.mark.asyncio -async def test_set_addons_config_async_from_dict(): - await test_set_addons_config_async(request_type=dict) - - -def test_set_addons_config_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.SetAddonsConfigRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_addons_config), - '__call__') as call: - call.return_value = cluster_service.Operation() - client.set_addons_config(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_set_addons_config_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.SetAddonsConfigRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_addons_config), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - await client.set_addons_config(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_set_addons_config_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_addons_config), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.set_addons_config( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - addons_config=cluster_service.AddonsConfig(http_load_balancing=cluster_service.HttpLoadBalancing(disabled=True)), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].addons_config == cluster_service.AddonsConfig(http_load_balancing=cluster_service.HttpLoadBalancing(disabled=True)) - - -def test_set_addons_config_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.set_addons_config( - cluster_service.SetAddonsConfigRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - addons_config=cluster_service.AddonsConfig(http_load_balancing=cluster_service.HttpLoadBalancing(disabled=True)), - ) - - -@pytest.mark.asyncio -async def test_set_addons_config_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_addons_config), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.set_addons_config( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - addons_config=cluster_service.AddonsConfig(http_load_balancing=cluster_service.HttpLoadBalancing(disabled=True)), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].addons_config == cluster_service.AddonsConfig(http_load_balancing=cluster_service.HttpLoadBalancing(disabled=True)) - - -@pytest.mark.asyncio -async def test_set_addons_config_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.set_addons_config( - cluster_service.SetAddonsConfigRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - addons_config=cluster_service.AddonsConfig(http_load_balancing=cluster_service.HttpLoadBalancing(disabled=True)), - ) - - -def test_set_locations(transport: str = 'grpc', request_type=cluster_service.SetLocationsRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_locations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - ) - response = client.set_locations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetLocationsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -def test_set_locations_from_dict(): - test_set_locations(request_type=dict) - - -def test_set_locations_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_locations), - '__call__') as call: - client.set_locations() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetLocationsRequest() - - -@pytest.mark.asyncio -async def test_set_locations_async(transport: str = 'grpc_asyncio', request_type=cluster_service.SetLocationsRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_locations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - )) - response = await client.set_locations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetLocationsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -@pytest.mark.asyncio -async def test_set_locations_async_from_dict(): - await test_set_locations_async(request_type=dict) - - -def test_set_locations_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.SetLocationsRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_locations), - '__call__') as call: - call.return_value = cluster_service.Operation() - client.set_locations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_set_locations_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.SetLocationsRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_locations), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - await client.set_locations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_set_locations_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_locations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.set_locations( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - locations=['locations_value'], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].locations == ['locations_value'] - - -def test_set_locations_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.set_locations( - cluster_service.SetLocationsRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - locations=['locations_value'], - ) - - -@pytest.mark.asyncio -async def test_set_locations_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_locations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.set_locations( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - locations=['locations_value'], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].locations == ['locations_value'] - - -@pytest.mark.asyncio -async def test_set_locations_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.set_locations( - cluster_service.SetLocationsRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - locations=['locations_value'], - ) - - -def test_update_master(transport: str = 'grpc', request_type=cluster_service.UpdateMasterRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_master), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - ) - response = client.update_master(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.UpdateMasterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -def test_update_master_from_dict(): - test_update_master(request_type=dict) - - -def test_update_master_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_master), - '__call__') as call: - client.update_master() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.UpdateMasterRequest() - - -@pytest.mark.asyncio -async def test_update_master_async(transport: str = 'grpc_asyncio', request_type=cluster_service.UpdateMasterRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_master), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - )) - response = await client.update_master(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.UpdateMasterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -@pytest.mark.asyncio -async def test_update_master_async_from_dict(): - await test_update_master_async(request_type=dict) - - -def test_update_master_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.UpdateMasterRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_master), - '__call__') as call: - call.return_value = cluster_service.Operation() - client.update_master(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_master_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.UpdateMasterRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_master), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - await client.update_master(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_update_master_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_master), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_master( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - master_version='master_version_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].master_version == 'master_version_value' - - -def test_update_master_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_master( - cluster_service.UpdateMasterRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - master_version='master_version_value', - ) - - -@pytest.mark.asyncio -async def test_update_master_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_master), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_master( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - master_version='master_version_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].master_version == 'master_version_value' - - -@pytest.mark.asyncio -async def test_update_master_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_master( - cluster_service.UpdateMasterRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - master_version='master_version_value', - ) - - -def test_set_master_auth(transport: str = 'grpc', request_type=cluster_service.SetMasterAuthRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_master_auth), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - ) - response = client.set_master_auth(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetMasterAuthRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -def test_set_master_auth_from_dict(): - test_set_master_auth(request_type=dict) - - -def test_set_master_auth_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_master_auth), - '__call__') as call: - client.set_master_auth() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetMasterAuthRequest() - - -@pytest.mark.asyncio -async def test_set_master_auth_async(transport: str = 'grpc_asyncio', request_type=cluster_service.SetMasterAuthRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_master_auth), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - )) - response = await client.set_master_auth(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetMasterAuthRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -@pytest.mark.asyncio -async def test_set_master_auth_async_from_dict(): - await test_set_master_auth_async(request_type=dict) - - -def test_set_master_auth_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.SetMasterAuthRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_master_auth), - '__call__') as call: - call.return_value = cluster_service.Operation() - client.set_master_auth(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_set_master_auth_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.SetMasterAuthRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_master_auth), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - await client.set_master_auth(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_cluster(transport: str = 'grpc', request_type=cluster_service.DeleteClusterRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - ) - response = client.delete_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.DeleteClusterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -def test_delete_cluster_from_dict(): - test_delete_cluster(request_type=dict) - - -def test_delete_cluster_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_cluster), - '__call__') as call: - client.delete_cluster() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.DeleteClusterRequest() - - -@pytest.mark.asyncio -async def test_delete_cluster_async(transport: str = 'grpc_asyncio', request_type=cluster_service.DeleteClusterRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - )) - response = await client.delete_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.DeleteClusterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -@pytest.mark.asyncio -async def test_delete_cluster_async_from_dict(): - await test_delete_cluster_async(request_type=dict) - - -def test_delete_cluster_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.DeleteClusterRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_cluster), - '__call__') as call: - call.return_value = cluster_service.Operation() - client.delete_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_cluster_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.DeleteClusterRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_cluster), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - await client.delete_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_cluster_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_cluster( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - - -def test_delete_cluster_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_cluster( - cluster_service.DeleteClusterRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - ) - - -@pytest.mark.asyncio -async def test_delete_cluster_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_cluster( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - - -@pytest.mark.asyncio -async def test_delete_cluster_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_cluster( - cluster_service.DeleteClusterRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - ) - - -def test_list_operations(transport: str = 'grpc', request_type=cluster_service.ListOperationsRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_operations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.ListOperationsResponse( - missing_zones=['missing_zones_value'], - ) - response = client.list_operations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.ListOperationsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.ListOperationsResponse) - assert response.missing_zones == ['missing_zones_value'] - - -def test_list_operations_from_dict(): - test_list_operations(request_type=dict) - - -def test_list_operations_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_operations), - '__call__') as call: - client.list_operations() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.ListOperationsRequest() - - -@pytest.mark.asyncio -async def test_list_operations_async(transport: str = 'grpc_asyncio', request_type=cluster_service.ListOperationsRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_operations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListOperationsResponse( - missing_zones=['missing_zones_value'], - )) - response = await client.list_operations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.ListOperationsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.ListOperationsResponse) - assert response.missing_zones == ['missing_zones_value'] - - -@pytest.mark.asyncio -async def test_list_operations_async_from_dict(): - await test_list_operations_async(request_type=dict) - - -def test_list_operations_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.ListOperationsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_operations), - '__call__') as call: - call.return_value = cluster_service.ListOperationsResponse() - client.list_operations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_operations_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.ListOperationsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_operations), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListOperationsResponse()) - await client.list_operations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_operations_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_operations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.ListOperationsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_operations( - project_id='project_id_value', - zone='zone_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - - -def test_list_operations_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_operations( - cluster_service.ListOperationsRequest(), - project_id='project_id_value', - zone='zone_value', - ) - - -@pytest.mark.asyncio -async def test_list_operations_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_operations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.ListOperationsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListOperationsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_operations( - project_id='project_id_value', - zone='zone_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - - -@pytest.mark.asyncio -async def test_list_operations_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_operations( - cluster_service.ListOperationsRequest(), - project_id='project_id_value', - zone='zone_value', - ) - - -def test_get_operation(transport: str = 'grpc', request_type=cluster_service.GetOperationRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_operation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - ) - response = client.get_operation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.GetOperationRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -def test_get_operation_from_dict(): - test_get_operation(request_type=dict) - - -def test_get_operation_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_operation), - '__call__') as call: - client.get_operation() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.GetOperationRequest() - - -@pytest.mark.asyncio -async def test_get_operation_async(transport: str = 'grpc_asyncio', request_type=cluster_service.GetOperationRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_operation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - )) - response = await client.get_operation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.GetOperationRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -@pytest.mark.asyncio -async def test_get_operation_async_from_dict(): - await test_get_operation_async(request_type=dict) - - -def test_get_operation_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.GetOperationRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_operation), - '__call__') as call: - call.return_value = cluster_service.Operation() - client.get_operation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_operation_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.GetOperationRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_operation), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - await client.get_operation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_operation_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_operation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_operation( - project_id='project_id_value', - zone='zone_value', - operation_id='operation_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].operation_id == 'operation_id_value' - - -def test_get_operation_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_operation( - cluster_service.GetOperationRequest(), - project_id='project_id_value', - zone='zone_value', - operation_id='operation_id_value', - ) - - -@pytest.mark.asyncio -async def test_get_operation_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_operation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_operation( - project_id='project_id_value', - zone='zone_value', - operation_id='operation_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].operation_id == 'operation_id_value' - - -@pytest.mark.asyncio -async def test_get_operation_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_operation( - cluster_service.GetOperationRequest(), - project_id='project_id_value', - zone='zone_value', - operation_id='operation_id_value', - ) - - -def test_cancel_operation(transport: str = 'grpc', request_type=cluster_service.CancelOperationRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_operation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.cancel_operation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.CancelOperationRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_cancel_operation_from_dict(): - test_cancel_operation(request_type=dict) - - -def test_cancel_operation_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_operation), - '__call__') as call: - client.cancel_operation() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.CancelOperationRequest() - - -@pytest.mark.asyncio -async def test_cancel_operation_async(transport: str = 'grpc_asyncio', request_type=cluster_service.CancelOperationRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_operation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.cancel_operation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.CancelOperationRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_cancel_operation_async_from_dict(): - await test_cancel_operation_async(request_type=dict) - - -def test_cancel_operation_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.CancelOperationRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_operation), - '__call__') as call: - call.return_value = None - client.cancel_operation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_cancel_operation_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.CancelOperationRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_operation), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.cancel_operation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_cancel_operation_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_operation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.cancel_operation( - project_id='project_id_value', - zone='zone_value', - operation_id='operation_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].operation_id == 'operation_id_value' - - -def test_cancel_operation_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.cancel_operation( - cluster_service.CancelOperationRequest(), - project_id='project_id_value', - zone='zone_value', - operation_id='operation_id_value', - ) - - -@pytest.mark.asyncio -async def test_cancel_operation_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.cancel_operation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.cancel_operation( - project_id='project_id_value', - zone='zone_value', - operation_id='operation_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].operation_id == 'operation_id_value' - - -@pytest.mark.asyncio -async def test_cancel_operation_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.cancel_operation( - cluster_service.CancelOperationRequest(), - project_id='project_id_value', - zone='zone_value', - operation_id='operation_id_value', - ) - - -def test_get_server_config(transport: str = 'grpc', request_type=cluster_service.GetServerConfigRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_server_config), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.ServerConfig( - default_cluster_version='default_cluster_version_value', - valid_node_versions=['valid_node_versions_value'], - default_image_type='default_image_type_value', - valid_image_types=['valid_image_types_value'], - valid_master_versions=['valid_master_versions_value'], - ) - response = client.get_server_config(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.GetServerConfigRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.ServerConfig) - assert response.default_cluster_version == 'default_cluster_version_value' - assert response.valid_node_versions == ['valid_node_versions_value'] - assert response.default_image_type == 'default_image_type_value' - assert response.valid_image_types == ['valid_image_types_value'] - assert response.valid_master_versions == ['valid_master_versions_value'] - - -def test_get_server_config_from_dict(): - test_get_server_config(request_type=dict) - - -def test_get_server_config_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_server_config), - '__call__') as call: - client.get_server_config() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.GetServerConfigRequest() - - -@pytest.mark.asyncio -async def test_get_server_config_async(transport: str = 'grpc_asyncio', request_type=cluster_service.GetServerConfigRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_server_config), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ServerConfig( - default_cluster_version='default_cluster_version_value', - valid_node_versions=['valid_node_versions_value'], - default_image_type='default_image_type_value', - valid_image_types=['valid_image_types_value'], - valid_master_versions=['valid_master_versions_value'], - )) - response = await client.get_server_config(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.GetServerConfigRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.ServerConfig) - assert response.default_cluster_version == 'default_cluster_version_value' - assert response.valid_node_versions == ['valid_node_versions_value'] - assert response.default_image_type == 'default_image_type_value' - assert response.valid_image_types == ['valid_image_types_value'] - assert response.valid_master_versions == ['valid_master_versions_value'] - - -@pytest.mark.asyncio -async def test_get_server_config_async_from_dict(): - await test_get_server_config_async(request_type=dict) - - -def test_get_server_config_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.GetServerConfigRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_server_config), - '__call__') as call: - call.return_value = cluster_service.ServerConfig() - client.get_server_config(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_server_config_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.GetServerConfigRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_server_config), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ServerConfig()) - await client.get_server_config(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_server_config_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_server_config), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.ServerConfig() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_server_config( - project_id='project_id_value', - zone='zone_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - - -def test_get_server_config_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_server_config( - cluster_service.GetServerConfigRequest(), - project_id='project_id_value', - zone='zone_value', - ) - - -@pytest.mark.asyncio -async def test_get_server_config_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_server_config), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.ServerConfig() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ServerConfig()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_server_config( - project_id='project_id_value', - zone='zone_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - - -@pytest.mark.asyncio -async def test_get_server_config_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_server_config( - cluster_service.GetServerConfigRequest(), - project_id='project_id_value', - zone='zone_value', - ) - - -def test_list_node_pools(transport: str = 'grpc', request_type=cluster_service.ListNodePoolsRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_node_pools), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.ListNodePoolsResponse( - ) - response = client.list_node_pools(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.ListNodePoolsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.ListNodePoolsResponse) - - -def test_list_node_pools_from_dict(): - test_list_node_pools(request_type=dict) - - -def test_list_node_pools_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_node_pools), - '__call__') as call: - client.list_node_pools() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.ListNodePoolsRequest() - - -@pytest.mark.asyncio -async def test_list_node_pools_async(transport: str = 'grpc_asyncio', request_type=cluster_service.ListNodePoolsRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_node_pools), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListNodePoolsResponse( - )) - response = await client.list_node_pools(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.ListNodePoolsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.ListNodePoolsResponse) - - -@pytest.mark.asyncio -async def test_list_node_pools_async_from_dict(): - await test_list_node_pools_async(request_type=dict) - - -def test_list_node_pools_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.ListNodePoolsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_node_pools), - '__call__') as call: - call.return_value = cluster_service.ListNodePoolsResponse() - client.list_node_pools(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_node_pools_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.ListNodePoolsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_node_pools), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListNodePoolsResponse()) - await client.list_node_pools(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_node_pools_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_node_pools), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.ListNodePoolsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_node_pools( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - - -def test_list_node_pools_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_node_pools( - cluster_service.ListNodePoolsRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - ) - - -@pytest.mark.asyncio -async def test_list_node_pools_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_node_pools), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.ListNodePoolsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListNodePoolsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_node_pools( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - - -@pytest.mark.asyncio -async def test_list_node_pools_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_node_pools( - cluster_service.ListNodePoolsRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - ) - - -def test_get_json_web_keys(transport: str = 'grpc', request_type=cluster_service.GetJSONWebKeysRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_json_web_keys), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.GetJSONWebKeysResponse( - ) - response = client.get_json_web_keys(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.GetJSONWebKeysRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.GetJSONWebKeysResponse) - - -def test_get_json_web_keys_from_dict(): - test_get_json_web_keys(request_type=dict) - - -def test_get_json_web_keys_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_json_web_keys), - '__call__') as call: - client.get_json_web_keys() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.GetJSONWebKeysRequest() - - -@pytest.mark.asyncio -async def test_get_json_web_keys_async(transport: str = 'grpc_asyncio', request_type=cluster_service.GetJSONWebKeysRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_json_web_keys), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.GetJSONWebKeysResponse( - )) - response = await client.get_json_web_keys(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.GetJSONWebKeysRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.GetJSONWebKeysResponse) - - -@pytest.mark.asyncio -async def test_get_json_web_keys_async_from_dict(): - await test_get_json_web_keys_async(request_type=dict) - - -def test_get_json_web_keys_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.GetJSONWebKeysRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_json_web_keys), - '__call__') as call: - call.return_value = cluster_service.GetJSONWebKeysResponse() - client.get_json_web_keys(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_json_web_keys_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.GetJSONWebKeysRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_json_web_keys), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.GetJSONWebKeysResponse()) - await client.get_json_web_keys(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_get_node_pool(transport: str = 'grpc', request_type=cluster_service.GetNodePoolRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_node_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.NodePool( - name='name_value', - initial_node_count=1911, - locations=['locations_value'], - self_link='self_link_value', - version='version_value', - instance_group_urls=['instance_group_urls_value'], - status=cluster_service.NodePool.Status.PROVISIONING, - status_message='status_message_value', - pod_ipv4_cidr_size=1856, - ) - response = client.get_node_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.GetNodePoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.NodePool) - assert response.name == 'name_value' - assert response.initial_node_count == 1911 - assert response.locations == ['locations_value'] - assert response.self_link == 'self_link_value' - assert response.version == 'version_value' - assert response.instance_group_urls == ['instance_group_urls_value'] - assert response.status == cluster_service.NodePool.Status.PROVISIONING - assert response.status_message == 'status_message_value' - assert response.pod_ipv4_cidr_size == 1856 - - -def test_get_node_pool_from_dict(): - test_get_node_pool(request_type=dict) - - -def test_get_node_pool_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_node_pool), - '__call__') as call: - client.get_node_pool() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.GetNodePoolRequest() - - -@pytest.mark.asyncio -async def test_get_node_pool_async(transport: str = 'grpc_asyncio', request_type=cluster_service.GetNodePoolRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_node_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.NodePool( - name='name_value', - initial_node_count=1911, - locations=['locations_value'], - self_link='self_link_value', - version='version_value', - instance_group_urls=['instance_group_urls_value'], - status=cluster_service.NodePool.Status.PROVISIONING, - status_message='status_message_value', - pod_ipv4_cidr_size=1856, - )) - response = await client.get_node_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.GetNodePoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.NodePool) - assert response.name == 'name_value' - assert response.initial_node_count == 1911 - assert response.locations == ['locations_value'] - assert response.self_link == 'self_link_value' - assert response.version == 'version_value' - assert response.instance_group_urls == ['instance_group_urls_value'] - assert response.status == cluster_service.NodePool.Status.PROVISIONING - assert response.status_message == 'status_message_value' - assert response.pod_ipv4_cidr_size == 1856 - - -@pytest.mark.asyncio -async def test_get_node_pool_async_from_dict(): - await test_get_node_pool_async(request_type=dict) - - -def test_get_node_pool_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.GetNodePoolRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_node_pool), - '__call__') as call: - call.return_value = cluster_service.NodePool() - client.get_node_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_node_pool_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.GetNodePoolRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_node_pool), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.NodePool()) - await client.get_node_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_get_node_pool_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_node_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.NodePool() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_node_pool( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - node_pool_id='node_pool_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].node_pool_id == 'node_pool_id_value' - - -def test_get_node_pool_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_node_pool( - cluster_service.GetNodePoolRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - node_pool_id='node_pool_id_value', - ) - - -@pytest.mark.asyncio -async def test_get_node_pool_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_node_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.NodePool() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.NodePool()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_node_pool( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - node_pool_id='node_pool_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].node_pool_id == 'node_pool_id_value' - - -@pytest.mark.asyncio -async def test_get_node_pool_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_node_pool( - cluster_service.GetNodePoolRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - node_pool_id='node_pool_id_value', - ) - - -def test_create_node_pool(transport: str = 'grpc', request_type=cluster_service.CreateNodePoolRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_node_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - ) - response = client.create_node_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.CreateNodePoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -def test_create_node_pool_from_dict(): - test_create_node_pool(request_type=dict) - - -def test_create_node_pool_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_node_pool), - '__call__') as call: - client.create_node_pool() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.CreateNodePoolRequest() - - -@pytest.mark.asyncio -async def test_create_node_pool_async(transport: str = 'grpc_asyncio', request_type=cluster_service.CreateNodePoolRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_node_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - )) - response = await client.create_node_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.CreateNodePoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -@pytest.mark.asyncio -async def test_create_node_pool_async_from_dict(): - await test_create_node_pool_async(request_type=dict) - - -def test_create_node_pool_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.CreateNodePoolRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_node_pool), - '__call__') as call: - call.return_value = cluster_service.Operation() - client.create_node_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_node_pool_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.CreateNodePoolRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_node_pool), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - await client.create_node_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_create_node_pool_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_node_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_node_pool( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - node_pool=cluster_service.NodePool(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].node_pool == cluster_service.NodePool(name='name_value') - - -def test_create_node_pool_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_node_pool( - cluster_service.CreateNodePoolRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - node_pool=cluster_service.NodePool(name='name_value'), - ) - - -@pytest.mark.asyncio -async def test_create_node_pool_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_node_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_node_pool( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - node_pool=cluster_service.NodePool(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].node_pool == cluster_service.NodePool(name='name_value') - - -@pytest.mark.asyncio -async def test_create_node_pool_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_node_pool( - cluster_service.CreateNodePoolRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - node_pool=cluster_service.NodePool(name='name_value'), - ) - - -def test_delete_node_pool(transport: str = 'grpc', request_type=cluster_service.DeleteNodePoolRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_node_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - ) - response = client.delete_node_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.DeleteNodePoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -def test_delete_node_pool_from_dict(): - test_delete_node_pool(request_type=dict) - - -def test_delete_node_pool_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_node_pool), - '__call__') as call: - client.delete_node_pool() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.DeleteNodePoolRequest() - - -@pytest.mark.asyncio -async def test_delete_node_pool_async(transport: str = 'grpc_asyncio', request_type=cluster_service.DeleteNodePoolRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_node_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - )) - response = await client.delete_node_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.DeleteNodePoolRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -@pytest.mark.asyncio -async def test_delete_node_pool_async_from_dict(): - await test_delete_node_pool_async(request_type=dict) - - -def test_delete_node_pool_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.DeleteNodePoolRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_node_pool), - '__call__') as call: - call.return_value = cluster_service.Operation() - client.delete_node_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_node_pool_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.DeleteNodePoolRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_node_pool), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - await client.delete_node_pool(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_delete_node_pool_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_node_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_node_pool( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - node_pool_id='node_pool_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].node_pool_id == 'node_pool_id_value' - - -def test_delete_node_pool_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_node_pool( - cluster_service.DeleteNodePoolRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - node_pool_id='node_pool_id_value', - ) - - -@pytest.mark.asyncio -async def test_delete_node_pool_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_node_pool), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_node_pool( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - node_pool_id='node_pool_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].node_pool_id == 'node_pool_id_value' - - -@pytest.mark.asyncio -async def test_delete_node_pool_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_node_pool( - cluster_service.DeleteNodePoolRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - node_pool_id='node_pool_id_value', - ) - - -def test_rollback_node_pool_upgrade(transport: str = 'grpc', request_type=cluster_service.RollbackNodePoolUpgradeRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.rollback_node_pool_upgrade), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - ) - response = client.rollback_node_pool_upgrade(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.RollbackNodePoolUpgradeRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -def test_rollback_node_pool_upgrade_from_dict(): - test_rollback_node_pool_upgrade(request_type=dict) - - -def test_rollback_node_pool_upgrade_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.rollback_node_pool_upgrade), - '__call__') as call: - client.rollback_node_pool_upgrade() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.RollbackNodePoolUpgradeRequest() - - -@pytest.mark.asyncio -async def test_rollback_node_pool_upgrade_async(transport: str = 'grpc_asyncio', request_type=cluster_service.RollbackNodePoolUpgradeRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.rollback_node_pool_upgrade), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - )) - response = await client.rollback_node_pool_upgrade(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.RollbackNodePoolUpgradeRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -@pytest.mark.asyncio -async def test_rollback_node_pool_upgrade_async_from_dict(): - await test_rollback_node_pool_upgrade_async(request_type=dict) - - -def test_rollback_node_pool_upgrade_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.RollbackNodePoolUpgradeRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.rollback_node_pool_upgrade), - '__call__') as call: - call.return_value = cluster_service.Operation() - client.rollback_node_pool_upgrade(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_rollback_node_pool_upgrade_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.RollbackNodePoolUpgradeRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.rollback_node_pool_upgrade), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - await client.rollback_node_pool_upgrade(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_rollback_node_pool_upgrade_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.rollback_node_pool_upgrade), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.rollback_node_pool_upgrade( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - node_pool_id='node_pool_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].node_pool_id == 'node_pool_id_value' - - -def test_rollback_node_pool_upgrade_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.rollback_node_pool_upgrade( - cluster_service.RollbackNodePoolUpgradeRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - node_pool_id='node_pool_id_value', - ) - - -@pytest.mark.asyncio -async def test_rollback_node_pool_upgrade_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.rollback_node_pool_upgrade), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.rollback_node_pool_upgrade( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - node_pool_id='node_pool_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].node_pool_id == 'node_pool_id_value' - - -@pytest.mark.asyncio -async def test_rollback_node_pool_upgrade_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.rollback_node_pool_upgrade( - cluster_service.RollbackNodePoolUpgradeRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - node_pool_id='node_pool_id_value', - ) - - -def test_set_node_pool_management(transport: str = 'grpc', request_type=cluster_service.SetNodePoolManagementRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_node_pool_management), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - ) - response = client.set_node_pool_management(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetNodePoolManagementRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -def test_set_node_pool_management_from_dict(): - test_set_node_pool_management(request_type=dict) - - -def test_set_node_pool_management_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_node_pool_management), - '__call__') as call: - client.set_node_pool_management() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetNodePoolManagementRequest() - - -@pytest.mark.asyncio -async def test_set_node_pool_management_async(transport: str = 'grpc_asyncio', request_type=cluster_service.SetNodePoolManagementRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_node_pool_management), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - )) - response = await client.set_node_pool_management(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetNodePoolManagementRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -@pytest.mark.asyncio -async def test_set_node_pool_management_async_from_dict(): - await test_set_node_pool_management_async(request_type=dict) - - -def test_set_node_pool_management_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.SetNodePoolManagementRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_node_pool_management), - '__call__') as call: - call.return_value = cluster_service.Operation() - client.set_node_pool_management(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_set_node_pool_management_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.SetNodePoolManagementRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_node_pool_management), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - await client.set_node_pool_management(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_set_node_pool_management_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_node_pool_management), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.set_node_pool_management( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - node_pool_id='node_pool_id_value', - management=cluster_service.NodeManagement(auto_upgrade=True), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].node_pool_id == 'node_pool_id_value' - assert args[0].management == cluster_service.NodeManagement(auto_upgrade=True) - - -def test_set_node_pool_management_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.set_node_pool_management( - cluster_service.SetNodePoolManagementRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - node_pool_id='node_pool_id_value', - management=cluster_service.NodeManagement(auto_upgrade=True), - ) - - -@pytest.mark.asyncio -async def test_set_node_pool_management_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_node_pool_management), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.set_node_pool_management( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - node_pool_id='node_pool_id_value', - management=cluster_service.NodeManagement(auto_upgrade=True), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].node_pool_id == 'node_pool_id_value' - assert args[0].management == cluster_service.NodeManagement(auto_upgrade=True) - - -@pytest.mark.asyncio -async def test_set_node_pool_management_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.set_node_pool_management( - cluster_service.SetNodePoolManagementRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - node_pool_id='node_pool_id_value', - management=cluster_service.NodeManagement(auto_upgrade=True), - ) - - -def test_set_labels(transport: str = 'grpc', request_type=cluster_service.SetLabelsRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_labels), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - ) - response = client.set_labels(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetLabelsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -def test_set_labels_from_dict(): - test_set_labels(request_type=dict) - - -def test_set_labels_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_labels), - '__call__') as call: - client.set_labels() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetLabelsRequest() - - -@pytest.mark.asyncio -async def test_set_labels_async(transport: str = 'grpc_asyncio', request_type=cluster_service.SetLabelsRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_labels), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - )) - response = await client.set_labels(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetLabelsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -@pytest.mark.asyncio -async def test_set_labels_async_from_dict(): - await test_set_labels_async(request_type=dict) - - -def test_set_labels_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.SetLabelsRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_labels), - '__call__') as call: - call.return_value = cluster_service.Operation() - client.set_labels(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_set_labels_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.SetLabelsRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_labels), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - await client.set_labels(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_set_labels_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_labels), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.set_labels( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - resource_labels={'key_value': 'value_value'}, - label_fingerprint='label_fingerprint_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].resource_labels == {'key_value': 'value_value'} - assert args[0].label_fingerprint == 'label_fingerprint_value' - - -def test_set_labels_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.set_labels( - cluster_service.SetLabelsRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - resource_labels={'key_value': 'value_value'}, - label_fingerprint='label_fingerprint_value', - ) - - -@pytest.mark.asyncio -async def test_set_labels_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_labels), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.set_labels( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - resource_labels={'key_value': 'value_value'}, - label_fingerprint='label_fingerprint_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].resource_labels == {'key_value': 'value_value'} - assert args[0].label_fingerprint == 'label_fingerprint_value' - - -@pytest.mark.asyncio -async def test_set_labels_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.set_labels( - cluster_service.SetLabelsRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - resource_labels={'key_value': 'value_value'}, - label_fingerprint='label_fingerprint_value', - ) - - -def test_set_legacy_abac(transport: str = 'grpc', request_type=cluster_service.SetLegacyAbacRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_legacy_abac), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - ) - response = client.set_legacy_abac(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetLegacyAbacRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -def test_set_legacy_abac_from_dict(): - test_set_legacy_abac(request_type=dict) - - -def test_set_legacy_abac_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_legacy_abac), - '__call__') as call: - client.set_legacy_abac() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetLegacyAbacRequest() - - -@pytest.mark.asyncio -async def test_set_legacy_abac_async(transport: str = 'grpc_asyncio', request_type=cluster_service.SetLegacyAbacRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_legacy_abac), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - )) - response = await client.set_legacy_abac(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetLegacyAbacRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -@pytest.mark.asyncio -async def test_set_legacy_abac_async_from_dict(): - await test_set_legacy_abac_async(request_type=dict) - - -def test_set_legacy_abac_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.SetLegacyAbacRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_legacy_abac), - '__call__') as call: - call.return_value = cluster_service.Operation() - client.set_legacy_abac(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_set_legacy_abac_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.SetLegacyAbacRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_legacy_abac), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - await client.set_legacy_abac(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_set_legacy_abac_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_legacy_abac), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.set_legacy_abac( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - enabled=True, - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].enabled == True - - -def test_set_legacy_abac_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.set_legacy_abac( - cluster_service.SetLegacyAbacRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - enabled=True, - ) - - -@pytest.mark.asyncio -async def test_set_legacy_abac_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_legacy_abac), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.set_legacy_abac( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - enabled=True, - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].enabled == True - - -@pytest.mark.asyncio -async def test_set_legacy_abac_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.set_legacy_abac( - cluster_service.SetLegacyAbacRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - enabled=True, - ) - - -def test_start_ip_rotation(transport: str = 'grpc', request_type=cluster_service.StartIPRotationRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.start_ip_rotation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - ) - response = client.start_ip_rotation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.StartIPRotationRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -def test_start_ip_rotation_from_dict(): - test_start_ip_rotation(request_type=dict) - - -def test_start_ip_rotation_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.start_ip_rotation), - '__call__') as call: - client.start_ip_rotation() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.StartIPRotationRequest() - - -@pytest.mark.asyncio -async def test_start_ip_rotation_async(transport: str = 'grpc_asyncio', request_type=cluster_service.StartIPRotationRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.start_ip_rotation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - )) - response = await client.start_ip_rotation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.StartIPRotationRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -@pytest.mark.asyncio -async def test_start_ip_rotation_async_from_dict(): - await test_start_ip_rotation_async(request_type=dict) - - -def test_start_ip_rotation_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.StartIPRotationRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.start_ip_rotation), - '__call__') as call: - call.return_value = cluster_service.Operation() - client.start_ip_rotation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_start_ip_rotation_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.StartIPRotationRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.start_ip_rotation), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - await client.start_ip_rotation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_start_ip_rotation_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.start_ip_rotation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.start_ip_rotation( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - - -def test_start_ip_rotation_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.start_ip_rotation( - cluster_service.StartIPRotationRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - ) - - -@pytest.mark.asyncio -async def test_start_ip_rotation_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.start_ip_rotation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.start_ip_rotation( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - - -@pytest.mark.asyncio -async def test_start_ip_rotation_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.start_ip_rotation( - cluster_service.StartIPRotationRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - ) - - -def test_complete_ip_rotation(transport: str = 'grpc', request_type=cluster_service.CompleteIPRotationRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.complete_ip_rotation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - ) - response = client.complete_ip_rotation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.CompleteIPRotationRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -def test_complete_ip_rotation_from_dict(): - test_complete_ip_rotation(request_type=dict) - - -def test_complete_ip_rotation_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.complete_ip_rotation), - '__call__') as call: - client.complete_ip_rotation() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.CompleteIPRotationRequest() - - -@pytest.mark.asyncio -async def test_complete_ip_rotation_async(transport: str = 'grpc_asyncio', request_type=cluster_service.CompleteIPRotationRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.complete_ip_rotation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - )) - response = await client.complete_ip_rotation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.CompleteIPRotationRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -@pytest.mark.asyncio -async def test_complete_ip_rotation_async_from_dict(): - await test_complete_ip_rotation_async(request_type=dict) - - -def test_complete_ip_rotation_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.CompleteIPRotationRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.complete_ip_rotation), - '__call__') as call: - call.return_value = cluster_service.Operation() - client.complete_ip_rotation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_complete_ip_rotation_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.CompleteIPRotationRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.complete_ip_rotation), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - await client.complete_ip_rotation(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_complete_ip_rotation_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.complete_ip_rotation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.complete_ip_rotation( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - - -def test_complete_ip_rotation_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.complete_ip_rotation( - cluster_service.CompleteIPRotationRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - ) - - -@pytest.mark.asyncio -async def test_complete_ip_rotation_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.complete_ip_rotation), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.complete_ip_rotation( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - - -@pytest.mark.asyncio -async def test_complete_ip_rotation_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.complete_ip_rotation( - cluster_service.CompleteIPRotationRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - ) - - -def test_set_node_pool_size(transport: str = 'grpc', request_type=cluster_service.SetNodePoolSizeRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_node_pool_size), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - ) - response = client.set_node_pool_size(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetNodePoolSizeRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -def test_set_node_pool_size_from_dict(): - test_set_node_pool_size(request_type=dict) - - -def test_set_node_pool_size_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_node_pool_size), - '__call__') as call: - client.set_node_pool_size() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetNodePoolSizeRequest() - - -@pytest.mark.asyncio -async def test_set_node_pool_size_async(transport: str = 'grpc_asyncio', request_type=cluster_service.SetNodePoolSizeRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_node_pool_size), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - )) - response = await client.set_node_pool_size(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetNodePoolSizeRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -@pytest.mark.asyncio -async def test_set_node_pool_size_async_from_dict(): - await test_set_node_pool_size_async(request_type=dict) - - -def test_set_node_pool_size_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.SetNodePoolSizeRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_node_pool_size), - '__call__') as call: - call.return_value = cluster_service.Operation() - client.set_node_pool_size(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_set_node_pool_size_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.SetNodePoolSizeRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_node_pool_size), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - await client.set_node_pool_size(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_set_network_policy(transport: str = 'grpc', request_type=cluster_service.SetNetworkPolicyRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_network_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - ) - response = client.set_network_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetNetworkPolicyRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -def test_set_network_policy_from_dict(): - test_set_network_policy(request_type=dict) - - -def test_set_network_policy_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_network_policy), - '__call__') as call: - client.set_network_policy() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetNetworkPolicyRequest() - - -@pytest.mark.asyncio -async def test_set_network_policy_async(transport: str = 'grpc_asyncio', request_type=cluster_service.SetNetworkPolicyRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_network_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - )) - response = await client.set_network_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetNetworkPolicyRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -@pytest.mark.asyncio -async def test_set_network_policy_async_from_dict(): - await test_set_network_policy_async(request_type=dict) - - -def test_set_network_policy_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.SetNetworkPolicyRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_network_policy), - '__call__') as call: - call.return_value = cluster_service.Operation() - client.set_network_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_set_network_policy_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.SetNetworkPolicyRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_network_policy), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - await client.set_network_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_set_network_policy_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_network_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.set_network_policy( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - network_policy=cluster_service.NetworkPolicy(provider=cluster_service.NetworkPolicy.Provider.CALICO), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].network_policy == cluster_service.NetworkPolicy(provider=cluster_service.NetworkPolicy.Provider.CALICO) - - -def test_set_network_policy_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.set_network_policy( - cluster_service.SetNetworkPolicyRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - network_policy=cluster_service.NetworkPolicy(provider=cluster_service.NetworkPolicy.Provider.CALICO), - ) - - -@pytest.mark.asyncio -async def test_set_network_policy_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_network_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.set_network_policy( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - network_policy=cluster_service.NetworkPolicy(provider=cluster_service.NetworkPolicy.Provider.CALICO), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].network_policy == cluster_service.NetworkPolicy(provider=cluster_service.NetworkPolicy.Provider.CALICO) - - -@pytest.mark.asyncio -async def test_set_network_policy_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.set_network_policy( - cluster_service.SetNetworkPolicyRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - network_policy=cluster_service.NetworkPolicy(provider=cluster_service.NetworkPolicy.Provider.CALICO), - ) - - -def test_set_maintenance_policy(transport: str = 'grpc', request_type=cluster_service.SetMaintenancePolicyRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_maintenance_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - ) - response = client.set_maintenance_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetMaintenancePolicyRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -def test_set_maintenance_policy_from_dict(): - test_set_maintenance_policy(request_type=dict) - - -def test_set_maintenance_policy_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_maintenance_policy), - '__call__') as call: - client.set_maintenance_policy() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetMaintenancePolicyRequest() - - -@pytest.mark.asyncio -async def test_set_maintenance_policy_async(transport: str = 'grpc_asyncio', request_type=cluster_service.SetMaintenancePolicyRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_maintenance_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation( - name='name_value', - zone='zone_value', - operation_type=cluster_service.Operation.Type.CREATE_CLUSTER, - status=cluster_service.Operation.Status.PENDING, - detail='detail_value', - status_message='status_message_value', - self_link='self_link_value', - target_link='target_link_value', - location='location_value', - start_time='start_time_value', - end_time='end_time_value', - )) - response = await client.set_maintenance_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.SetMaintenancePolicyRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.Operation) - assert response.name == 'name_value' - assert response.zone == 'zone_value' - assert response.operation_type == cluster_service.Operation.Type.CREATE_CLUSTER - assert response.status == cluster_service.Operation.Status.PENDING - assert response.detail == 'detail_value' - assert response.status_message == 'status_message_value' - assert response.self_link == 'self_link_value' - assert response.target_link == 'target_link_value' - assert response.location == 'location_value' - assert response.start_time == 'start_time_value' - assert response.end_time == 'end_time_value' - - -@pytest.mark.asyncio -async def test_set_maintenance_policy_async_from_dict(): - await test_set_maintenance_policy_async(request_type=dict) - - -def test_set_maintenance_policy_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.SetMaintenancePolicyRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_maintenance_policy), - '__call__') as call: - call.return_value = cluster_service.Operation() - client.set_maintenance_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_set_maintenance_policy_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.SetMaintenancePolicyRequest() - - request.name = 'name/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_maintenance_policy), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - await client.set_maintenance_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name/value', - ) in kw['metadata'] - - -def test_set_maintenance_policy_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_maintenance_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.set_maintenance_policy( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - maintenance_policy=cluster_service.MaintenancePolicy(window=cluster_service.MaintenanceWindow(daily_maintenance_window=cluster_service.DailyMaintenanceWindow(start_time='start_time_value'))), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].maintenance_policy == cluster_service.MaintenancePolicy(window=cluster_service.MaintenanceWindow(daily_maintenance_window=cluster_service.DailyMaintenanceWindow(start_time='start_time_value'))) - - -def test_set_maintenance_policy_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.set_maintenance_policy( - cluster_service.SetMaintenancePolicyRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - maintenance_policy=cluster_service.MaintenancePolicy(window=cluster_service.MaintenanceWindow(daily_maintenance_window=cluster_service.DailyMaintenanceWindow(start_time='start_time_value'))), - ) - - -@pytest.mark.asyncio -async def test_set_maintenance_policy_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_maintenance_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.Operation() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.Operation()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.set_maintenance_policy( - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - maintenance_policy=cluster_service.MaintenancePolicy(window=cluster_service.MaintenanceWindow(daily_maintenance_window=cluster_service.DailyMaintenanceWindow(start_time='start_time_value'))), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].project_id == 'project_id_value' - assert args[0].zone == 'zone_value' - assert args[0].cluster_id == 'cluster_id_value' - assert args[0].maintenance_policy == cluster_service.MaintenancePolicy(window=cluster_service.MaintenanceWindow(daily_maintenance_window=cluster_service.DailyMaintenanceWindow(start_time='start_time_value'))) - - -@pytest.mark.asyncio -async def test_set_maintenance_policy_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.set_maintenance_policy( - cluster_service.SetMaintenancePolicyRequest(), - project_id='project_id_value', - zone='zone_value', - cluster_id='cluster_id_value', - maintenance_policy=cluster_service.MaintenancePolicy(window=cluster_service.MaintenanceWindow(daily_maintenance_window=cluster_service.DailyMaintenanceWindow(start_time='start_time_value'))), - ) - - -def test_list_usable_subnetworks(transport: str = 'grpc', request_type=cluster_service.ListUsableSubnetworksRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_usable_subnetworks), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.ListUsableSubnetworksResponse( - next_page_token='next_page_token_value', - ) - response = client.list_usable_subnetworks(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.ListUsableSubnetworksRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListUsableSubnetworksPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_usable_subnetworks_from_dict(): - test_list_usable_subnetworks(request_type=dict) - - -def test_list_usable_subnetworks_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_usable_subnetworks), - '__call__') as call: - client.list_usable_subnetworks() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.ListUsableSubnetworksRequest() - - -@pytest.mark.asyncio -async def test_list_usable_subnetworks_async(transport: str = 'grpc_asyncio', request_type=cluster_service.ListUsableSubnetworksRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_usable_subnetworks), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListUsableSubnetworksResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_usable_subnetworks(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.ListUsableSubnetworksRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListUsableSubnetworksAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_usable_subnetworks_async_from_dict(): - await test_list_usable_subnetworks_async(request_type=dict) - - -def test_list_usable_subnetworks_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.ListUsableSubnetworksRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_usable_subnetworks), - '__call__') as call: - call.return_value = cluster_service.ListUsableSubnetworksResponse() - client.list_usable_subnetworks(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_usable_subnetworks_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.ListUsableSubnetworksRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_usable_subnetworks), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListUsableSubnetworksResponse()) - await client.list_usable_subnetworks(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_usable_subnetworks_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_usable_subnetworks), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.ListUsableSubnetworksResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_usable_subnetworks( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_usable_subnetworks_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_usable_subnetworks( - cluster_service.ListUsableSubnetworksRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_usable_subnetworks_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_usable_subnetworks), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.ListUsableSubnetworksResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListUsableSubnetworksResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_usable_subnetworks( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_usable_subnetworks_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_usable_subnetworks( - cluster_service.ListUsableSubnetworksRequest(), - parent='parent_value', - ) - - -def test_list_usable_subnetworks_pager(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_usable_subnetworks), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - cluster_service.ListUsableSubnetworksResponse( - subnetworks=[ - cluster_service.UsableSubnetwork(), - cluster_service.UsableSubnetwork(), - cluster_service.UsableSubnetwork(), - ], - next_page_token='abc', - ), - cluster_service.ListUsableSubnetworksResponse( - subnetworks=[], - next_page_token='def', - ), - cluster_service.ListUsableSubnetworksResponse( - subnetworks=[ - cluster_service.UsableSubnetwork(), - ], - next_page_token='ghi', - ), - cluster_service.ListUsableSubnetworksResponse( - subnetworks=[ - cluster_service.UsableSubnetwork(), - cluster_service.UsableSubnetwork(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_usable_subnetworks(request={}) - - assert pager._metadata == metadata - - results = [i for i in pager] - assert len(results) == 6 - assert all(isinstance(i, cluster_service.UsableSubnetwork) - for i in results) - -def test_list_usable_subnetworks_pages(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_usable_subnetworks), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - cluster_service.ListUsableSubnetworksResponse( - subnetworks=[ - cluster_service.UsableSubnetwork(), - cluster_service.UsableSubnetwork(), - cluster_service.UsableSubnetwork(), - ], - next_page_token='abc', - ), - cluster_service.ListUsableSubnetworksResponse( - subnetworks=[], - next_page_token='def', - ), - cluster_service.ListUsableSubnetworksResponse( - subnetworks=[ - cluster_service.UsableSubnetwork(), - ], - next_page_token='ghi', - ), - cluster_service.ListUsableSubnetworksResponse( - subnetworks=[ - cluster_service.UsableSubnetwork(), - cluster_service.UsableSubnetwork(), - ], - ), - RuntimeError, - ) - pages = list(client.list_usable_subnetworks(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_usable_subnetworks_async_pager(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_usable_subnetworks), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - cluster_service.ListUsableSubnetworksResponse( - subnetworks=[ - cluster_service.UsableSubnetwork(), - cluster_service.UsableSubnetwork(), - cluster_service.UsableSubnetwork(), - ], - next_page_token='abc', - ), - cluster_service.ListUsableSubnetworksResponse( - subnetworks=[], - next_page_token='def', - ), - cluster_service.ListUsableSubnetworksResponse( - subnetworks=[ - cluster_service.UsableSubnetwork(), - ], - next_page_token='ghi', - ), - cluster_service.ListUsableSubnetworksResponse( - subnetworks=[ - cluster_service.UsableSubnetwork(), - cluster_service.UsableSubnetwork(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_usable_subnetworks(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, cluster_service.UsableSubnetwork) - for i in responses) - -@pytest.mark.asyncio -async def test_list_usable_subnetworks_async_pages(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_usable_subnetworks), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - cluster_service.ListUsableSubnetworksResponse( - subnetworks=[ - cluster_service.UsableSubnetwork(), - cluster_service.UsableSubnetwork(), - cluster_service.UsableSubnetwork(), - ], - next_page_token='abc', - ), - cluster_service.ListUsableSubnetworksResponse( - subnetworks=[], - next_page_token='def', - ), - cluster_service.ListUsableSubnetworksResponse( - subnetworks=[ - cluster_service.UsableSubnetwork(), - ], - next_page_token='ghi', - ), - cluster_service.ListUsableSubnetworksResponse( - subnetworks=[ - cluster_service.UsableSubnetwork(), - cluster_service.UsableSubnetwork(), - ], - ), - RuntimeError, - ) - pages = [] - async for page_ in (await client.list_usable_subnetworks(request={})).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -def test_list_locations(transport: str = 'grpc', request_type=cluster_service.ListLocationsRequest): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_locations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.ListLocationsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_locations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.ListLocationsRequest() - - # Establish that the response is the type that we expect. - assert response.raw_page is response - assert isinstance(response, cluster_service.ListLocationsResponse) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_locations_from_dict(): - test_list_locations(request_type=dict) - - -def test_list_locations_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_locations), - '__call__') as call: - client.list_locations() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.ListLocationsRequest() - - -@pytest.mark.asyncio -async def test_list_locations_async(transport: str = 'grpc_asyncio', request_type=cluster_service.ListLocationsRequest): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_locations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListLocationsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_locations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == cluster_service.ListLocationsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, cluster_service.ListLocationsResponse) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_locations_async_from_dict(): - await test_list_locations_async(request_type=dict) - - -def test_list_locations_field_headers(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.ListLocationsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_locations), - '__call__') as call: - call.return_value = cluster_service.ListLocationsResponse() - client.list_locations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_locations_field_headers_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = cluster_service.ListLocationsRequest() - - request.parent = 'parent/value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_locations), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListLocationsResponse()) - await client.list_locations(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent/value', - ) in kw['metadata'] - - -def test_list_locations_flattened(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_locations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.ListLocationsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_locations( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -def test_list_locations_flattened_error(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_locations( - cluster_service.ListLocationsRequest(), - parent='parent_value', - ) - - -@pytest.mark.asyncio -async def test_list_locations_flattened_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_locations), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = cluster_service.ListLocationsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(cluster_service.ListLocationsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_locations( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0].parent == 'parent_value' - - -@pytest.mark.asyncio -async def test_list_locations_flattened_error_async(): - client = ClusterManagerAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_locations( - cluster_service.ListLocationsRequest(), - parent='parent_value', - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.ClusterManagerGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.ClusterManagerGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = ClusterManagerClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.ClusterManagerGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - with pytest.raises(ValueError): - client = ClusterManagerClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.ClusterManagerGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - client = ClusterManagerClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.ClusterManagerGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.ClusterManagerGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.ClusterManagerGrpcTransport, - transports.ClusterManagerGrpcAsyncIOTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class() - adc.assert_called_once() - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - ) - assert isinstance( - client.transport, - transports.ClusterManagerGrpcTransport, - ) - -def test_cluster_manager_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.ClusterManagerTransport( - credentials=ga_credentials.AnonymousCredentials(), - credentials_file="credentials.json" - ) - - -def test_cluster_manager_base_transport(): - # Instantiate the base transport. - with mock.patch('google.container_v1beta1.services.cluster_manager.transports.ClusterManagerTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.ClusterManagerTransport( - credentials=ga_credentials.AnonymousCredentials(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'list_clusters', - 'get_cluster', - 'create_cluster', - 'update_cluster', - 'update_node_pool', - 'set_node_pool_autoscaling', - 'set_logging_service', - 'set_monitoring_service', - 'set_addons_config', - 'set_locations', - 'update_master', - 'set_master_auth', - 'delete_cluster', - 'list_operations', - 'get_operation', - 'cancel_operation', - 'get_server_config', - 'list_node_pools', - 'get_json_web_keys', - 'get_node_pool', - 'create_node_pool', - 'delete_node_pool', - 'rollback_node_pool_upgrade', - 'set_node_pool_management', - 'set_labels', - 'set_legacy_abac', - 'start_ip_rotation', - 'complete_ip_rotation', - 'set_node_pool_size', - 'set_network_policy', - 'set_maintenance_policy', - 'list_usable_subnetworks', - 'list_locations', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - -@requires_google_auth_gte_1_25_0 -def test_cluster_manager_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.container_v1beta1.services.cluster_manager.transports.ClusterManagerTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.ClusterManagerTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@requires_google_auth_lt_1_25_0 -def test_cluster_manager_base_transport_with_credentials_file_old_google_auth(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.container_v1beta1.services.cluster_manager.transports.ClusterManagerTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.ClusterManagerTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", scopes=( - 'https://www.googleapis.com/auth/cloud-platform', - ), - quota_project_id="octopus", - ) - - -def test_cluster_manager_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.container_v1beta1.services.cluster_manager.transports.ClusterManagerTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport = transports.ClusterManagerTransport() - adc.assert_called_once() - - -@requires_google_auth_gte_1_25_0 -def test_cluster_manager_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - ClusterManagerClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id=None, - ) - - -@requires_google_auth_lt_1_25_0 -def test_cluster_manager_auth_adc_old_google_auth(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - ClusterManagerClient() - adc.assert_called_once_with( - scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.ClusterManagerGrpcTransport, - transports.ClusterManagerGrpcAsyncIOTransport, - ], -) -@requires_google_auth_gte_1_25_0 -def test_cluster_manager_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/cloud-platform',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.ClusterManagerGrpcTransport, - transports.ClusterManagerGrpcAsyncIOTransport, - ], -) -@requires_google_auth_lt_1_25_0 -def test_cluster_manager_transport_auth_adc_old_google_auth(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) - transport_class(quota_project_id="octopus") - adc.assert_called_once_with(scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.ClusterManagerGrpcTransport, grpc_helpers), - (transports.ClusterManagerGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_cluster_manager_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "container.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/cloud-platform', -), - scopes=["1", "2"], - default_host="container.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.ClusterManagerGrpcTransport, transports.ClusterManagerGrpcAsyncIOTransport]) -def test_cluster_manager_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = ga_credentials.AnonymousCredentials() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - - -def test_cluster_manager_host_no_port(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='container.googleapis.com'), - ) - assert client.transport._host == 'container.googleapis.com:443' - - -def test_cluster_manager_host_with_port(): - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - client_options=client_options.ClientOptions(api_endpoint='container.googleapis.com:8000'), - ) - assert client.transport._host == 'container.googleapis.com:8000' - -def test_cluster_manager_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.ClusterManagerGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_cluster_manager_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.ClusterManagerGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.ClusterManagerGrpcTransport, transports.ClusterManagerGrpcAsyncIOTransport]) -def test_cluster_manager_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = ga_credentials.AnonymousCredentials() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.ClusterManagerGrpcTransport, transports.ClusterManagerGrpcAsyncIOTransport]) -def test_cluster_manager_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_topic_path(): - project = "squid" - topic = "clam" - expected = "projects/{project}/topics/{topic}".format(project=project, topic=topic, ) - actual = ClusterManagerClient.topic_path(project, topic) - assert expected == actual - - -def test_parse_topic_path(): - expected = { - "project": "whelk", - "topic": "octopus", - } - path = ClusterManagerClient.topic_path(**expected) - - # Check that the path construction is reversible. - actual = ClusterManagerClient.parse_topic_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "oyster" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = ClusterManagerClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "nudibranch", - } - path = ClusterManagerClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = ClusterManagerClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "cuttlefish" - expected = "folders/{folder}".format(folder=folder, ) - actual = ClusterManagerClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "mussel", - } - path = ClusterManagerClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = ClusterManagerClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "winkle" - expected = "organizations/{organization}".format(organization=organization, ) - actual = ClusterManagerClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "nautilus", - } - path = ClusterManagerClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = ClusterManagerClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "scallop" - expected = "projects/{project}".format(project=project, ) - actual = ClusterManagerClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "abalone", - } - path = ClusterManagerClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = ClusterManagerClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "squid" - location = "clam" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = ClusterManagerClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "whelk", - "location": "octopus", - } - path = ClusterManagerClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = ClusterManagerClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_withDEFAULT_CLIENT_INFO(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.ClusterManagerTransport, '_prep_wrapped_messages') as prep: - client = ClusterManagerClient( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.ClusterManagerTransport, '_prep_wrapped_messages') as prep: - transport_class = ClusterManagerClient.get_transport_class() - transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) diff --git a/tests/unit/gapic/container_v1/test_cluster_manager.py b/tests/unit/gapic/container_v1/test_cluster_manager.py index 28cc1f9f..56618242 100644 --- a/tests/unit/gapic/container_v1/test_cluster_manager.py +++ b/tests/unit/gapic/container_v1/test_cluster_manager.py @@ -119,18 +119,6 @@ def test_cluster_manager_client_from_service_account_info(client_class): assert client.transport._host == "container.googleapis.com:443" -@pytest.mark.parametrize( - "client_class", [ClusterManagerClient, ClusterManagerAsyncClient,] -) -def test_cluster_manager_client_service_account_always_use_jwt(client_class): - with mock.patch.object( - service_account.Credentials, "with_always_use_jwt_access", create=True - ) as use_jwt: - creds = service_account.Credentials(None, None, None) - client = client_class(credentials=creds) - use_jwt.assert_not_called() - - @pytest.mark.parametrize( "transport_class,transport_name", [ @@ -138,7 +126,7 @@ def test_cluster_manager_client_service_account_always_use_jwt(client_class): (transports.ClusterManagerGrpcAsyncIOTransport, "grpc_asyncio"), ], ) -def test_cluster_manager_client_service_account_always_use_jwt_true( +def test_cluster_manager_client_service_account_always_use_jwt( transport_class, transport_name ): with mock.patch.object( @@ -148,6 +136,13 @@ def test_cluster_manager_client_service_account_always_use_jwt_true( transport = transport_class(credentials=creds, always_use_jwt_access=True) use_jwt.assert_called_once_with(True) + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + @pytest.mark.parametrize( "client_class", [ClusterManagerClient, ClusterManagerAsyncClient,] @@ -228,6 +223,7 @@ def test_cluster_manager_client_client_options( client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is @@ -244,6 +240,7 @@ def test_cluster_manager_client_client_options( client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is @@ -260,6 +257,7 @@ def test_cluster_manager_client_client_options( client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has @@ -288,6 +286,7 @@ def test_cluster_manager_client_client_options( client_cert_source_for_mtls=None, quota_project_id="octopus", client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) @@ -354,6 +353,7 @@ def test_cluster_manager_client_mtls_env_auto( client_cert_source_for_mtls=expected_client_cert_source, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) # Check the case ADC client cert is provided. Whether client cert is used depends on @@ -387,6 +387,7 @@ def test_cluster_manager_client_mtls_env_auto( client_cert_source_for_mtls=expected_client_cert_source, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) # Check the case client_cert_source and ADC client cert are not provided. @@ -408,6 +409,7 @@ def test_cluster_manager_client_mtls_env_auto( client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) @@ -438,6 +440,7 @@ def test_cluster_manager_client_client_options_scopes( client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) @@ -468,6 +471,7 @@ def test_cluster_manager_client_client_options_credentials_file( client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) @@ -487,6 +491,7 @@ def test_cluster_manager_client_client_options_from_dict(): client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) diff --git a/tests/unit/gapic/container_v1beta1/test_cluster_manager.py b/tests/unit/gapic/container_v1beta1/test_cluster_manager.py index 41a7277b..40533eae 100644 --- a/tests/unit/gapic/container_v1beta1/test_cluster_manager.py +++ b/tests/unit/gapic/container_v1beta1/test_cluster_manager.py @@ -124,18 +124,6 @@ def test_cluster_manager_client_from_service_account_info(client_class): assert client.transport._host == "container.googleapis.com:443" -@pytest.mark.parametrize( - "client_class", [ClusterManagerClient, ClusterManagerAsyncClient,] -) -def test_cluster_manager_client_service_account_always_use_jwt(client_class): - with mock.patch.object( - service_account.Credentials, "with_always_use_jwt_access", create=True - ) as use_jwt: - creds = service_account.Credentials(None, None, None) - client = client_class(credentials=creds) - use_jwt.assert_not_called() - - @pytest.mark.parametrize( "transport_class,transport_name", [ @@ -143,7 +131,7 @@ def test_cluster_manager_client_service_account_always_use_jwt(client_class): (transports.ClusterManagerGrpcAsyncIOTransport, "grpc_asyncio"), ], ) -def test_cluster_manager_client_service_account_always_use_jwt_true( +def test_cluster_manager_client_service_account_always_use_jwt( transport_class, transport_name ): with mock.patch.object( @@ -153,6 +141,13 @@ def test_cluster_manager_client_service_account_always_use_jwt_true( transport = transport_class(credentials=creds, always_use_jwt_access=True) use_jwt.assert_called_once_with(True) + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + @pytest.mark.parametrize( "client_class", [ClusterManagerClient, ClusterManagerAsyncClient,] @@ -233,6 +228,7 @@ def test_cluster_manager_client_client_options( client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is @@ -249,6 +245,7 @@ def test_cluster_manager_client_client_options( client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is @@ -265,6 +262,7 @@ def test_cluster_manager_client_client_options( client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has @@ -293,6 +291,7 @@ def test_cluster_manager_client_client_options( client_cert_source_for_mtls=None, quota_project_id="octopus", client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) @@ -359,6 +358,7 @@ def test_cluster_manager_client_mtls_env_auto( client_cert_source_for_mtls=expected_client_cert_source, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) # Check the case ADC client cert is provided. Whether client cert is used depends on @@ -392,6 +392,7 @@ def test_cluster_manager_client_mtls_env_auto( client_cert_source_for_mtls=expected_client_cert_source, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) # Check the case client_cert_source and ADC client cert are not provided. @@ -413,6 +414,7 @@ def test_cluster_manager_client_mtls_env_auto( client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) @@ -443,6 +445,7 @@ def test_cluster_manager_client_client_options_scopes( client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) @@ -473,6 +476,7 @@ def test_cluster_manager_client_client_options_credentials_file( client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, ) @@ -492,6 +496,7 @@ def test_cluster_manager_client_client_options_from_dict(): client_cert_source_for_mtls=None, quota_project_id=None, client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, )